You are on page 1of 33

Deep Learning 1803441

Practical file
Deep learning
Submitted by:
Harmanjeet Singh
B. Tech CSE (7)
1803448

Submitted to:
Ms. Mamta

Index
1
Deep Learning 1803441

S.NO. CONTENTS PAGE


NO.

1. Creating a basic network and analyse its performance 3

2. Deploy the Confusion matrix and simulate for Overfitting. 4

3. Visualizing a neural network 5

4. Demo: Object Detection with pre-trained RetinaNet with Keras. 6–9

5. Neural Recommender Systems with Explicit Feedback. 10 – 13

6. Backpropagation in Neural Networks using numpy. 14 – 26

7. Text classification and word vector 27 – 28

Fully convolutional networks


8. 29 – 30

9. Covnets for classification and localization 31 – 32

10. Character level language model 33

PRACTICAL 1
AIM: Creating a basic network and analyze its performance.

2
Deep Learning 1803441

>> [x,t]=wine_dataset;
>> size(x)
>> size(t)

>> net=patternnet(10);
>> view(net)
>> [net,tr] = train(net,x,t);
>> nntraintool

PRACTICAL 2
AIM: Deploy the confusion matrix and simulate for overfitting.

3
Deep Learning 1803441

>> net=patternnet(10);
>> view(net)
>> [net,tr] = train(net,x,t);
>> nntraintool
>> nntraintool('close')
>> plotperform(tr)
>> testX = x(:,tr.testInd);
>> testT = t(:,tr.testInd);
>> testY=net(testX);
>> testIndices = vec2ind(testY)
>> plotconfusion(testT,testY)
>> [c,cm]= confusion(testT,testY)

OUTPUT:

PRACTICAL 3
AIM: Visualizing a neural network.

4
Deep Learning 1803441

>> [x,t]=wine_dataset;
>> size(x)
>> size(t)
>> net=patternnet(10);
>> view(net)

OUTPUT:

PRACTICAL 4
AIM: Demo: Object Detection with pre-trained RetinaNet with Keras.

5
Deep Learning 1803441

# plot one photograph and mask


from os import listdir
from xml.etree import ElementTree
from numpy import zeros
from numpy import asarray
from mrcnn.utils import Dataset
from matplotlib import pyplot
 
# class that defines and loads the kangaroo dataset
class KangarooDataset(Dataset):
# load the dataset definitions
def load_dataset(self, dataset_dir, is_train=True):
# define one class
self.add_class("dataset", 1, "kangaroo")
# define data locations
images_dir = dataset_dir + '/images/'
annotations_dir = dataset_dir + '/annots/'
# find all images
for filename in listdir(images_dir):
# extract image id
image_id = filename[:-4]
# skip bad images
if image_id in ['00090']:
continue
# skip all images after 150 if we are building the train set
if is_train and int(image_id) >= 150:
continue
# skip all images before 150 if we are building the test/val set
if not is_train and int(image_id) < 150:
continue
img_path = images_dir + filename
ann_path = annotations_dir + image_id + '.xml'
# add to dataset
self.add_image('dataset', image_id=image_id, path=img_path, annotation=ann_path)
 
# extract bounding boxes from an annotation file
6
Deep Learning 1803441

def extract_boxes(self, filename):


# load and parse the file
tree = ElementTree.parse(filename)
# get the root of the document
root = tree.getroot()
# extract each bounding box
boxes = list()
for box in root.findall('.//bndbox'):
xmin = int(box.find('xmin').text)
ymin = int(box.find('ymin').text)
xmax = int(box.find('xmax').text)
ymax = int(box.find('ymax').text)
coors = [xmin, ymin, xmax, ymax]
boxes.append(coors)
# extract image dimensions
width = int(root.find('.//size/width').text)
height = int(root.find('.//size/height').text)
return boxes, width, height
 
# load the masks for an image
def load_mask(self, image_id):
# get details of image
info = self.image_info[image_id]
# define box file location
path = info['annotation']
# load XML
boxes, w, h = self.extract_boxes(path)
# create one array for all masks, each on a different channel
masks = zeros([h, w, len(boxes)], dtype='uint8')
# create masks
class_ids = list()
for i in range(len(boxes)):
box = boxes[i]
row_s, row_e = box[1], box[3]
col_s, col_e = box[0], box[2]
masks[row_s:row_e, col_s:col_e, i] = 1
7
Deep Learning 1803441

class_ids.append(self.class_names.index('kangaroo'))
return masks, asarray(class_ids, dtype='int32')
 
# load an image reference
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
 
# train set
train_set = KangarooDataset()
train_set.load_dataset('kangaroo', is_train=True)
train_set.prepare()
# load an image
image_id = 0
image = train_set.load_image(image_id)
print(image.shape)
# load image mask
mask, class_ids = train_set.load_mask(image_id)
print(mask.shape)
# plot image
pyplot.imshow(image)
# plot mask
pyplot.imshow(mask[:, :, 0], cmap='gray', alpha=0.5)
pyplot.show()

OUTPUT:

8
Deep Learning 1803441

PRACTICAL 5

9
Deep Learning 1803441

AIM: Neural Recommender Systems with Explicit Feedback.


# code
import numpy as np
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
  
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
  
ratings = pd.read_csv")
ratings.head()
  
movies = pd.read_csvmovies.head()
  
n_ratings = len(ratings)
n_movies = len(ratings['movieId'].unique())
n_users = len(ratings['userId'].unique())
  
print(f"Number of ratings: {n_ratings}")
print(f"Number of unique movieId's: {n_movies}")
print(f"Number of unique users: {n_users}")
print(f"Average ratings per user: {round(n_ratings/n_users, 2)}")
print(f"Average ratings per movie: {round(n_ratings/n_movies, 2)}")
  
user_freq = ratings[['userId', 'movieId']].groupby('userId').count().reset_index()
user_freq.columns = ['userId', 'n_ratings']
user_freq.head()
  
  
# Find Lowest and Highest rated movies:
mean_rating = ratings.groupby('movieId')[['rating']].mean()
# Lowest rated movies
lowest_rated = mean_rating['rating'].idxmin()
movies.loc[movies['movieId'] == lowest_rated]
# Highest rated movies
highest_rated = mean_rating['rating'].idxmax()
movies.loc[movies['movieId'] == highest_rated]
# show number of people who rated movies rated movie highest
ratings[ratings['movieId']==highest_rated]
# show number of people who rated movies rated movie lowest
ratings[ratings['movieId']==lowest_rated]
  
## the above movies has very low dataset. We will use bayesian average

10
Deep Learning 1803441

movie_stats = ratings.groupby('movieId')[['rating']].agg(['count', 'mean'])


movie_stats.columns = movie_stats.columns.droplevel()
  
# Now, we create user-item matrix using scipy csr matrix
from scipy.sparse import csr_matrix
  
def create_matrix(df):
    N = len(df['userId'].unique())
    M = len(df['movieId'].unique())
      
    # Map Ids to indices
    user_mapper = dict(zip(np.unique(df["userId"]), list(range(N))))
    movie_mapper = dict(zip(np.unique(df["movieId"]), list(range(M))))
      
    # Map indices to IDs
    user_inv_mapper = dict(zip(list(range(N)), np.unique(df["userId"])))
    movie_inv_mapper = dict(zip(list(range(M)), np.unique(df["movieId"])))
      
    user_index = [user_mapper[i] for i in df['userId']]
    movie_index = [movie_mapper[i] for i in df['movieId']]
  
    X = csr_matrix((df["rating"], (movie_index, user_index)), shape=(M, N))
      
    return X, user_mapper, movie_mapper, user_inv_mapper, movie_inv_mapper
  
X, user_mapper, movie_mapper, user_inv_mapper, movie_inv_mapper = create_matrix(ratings)
  
from sklearn.neighbors import NearestNeighbors
"""
Find similar movies using KNN
"""
def find_similar_movies(movie_id, X, k, metric='cosine', show_distance=False):
      
    neighbour_ids = []
      
    movie_ind = movie_mapper[movie_id]
    movie_vec = X[movie_ind]
    k+=1
    kNN = NearestNeighbors(n_neighbors=k, algorithm="brute", metric=metric)
    kNN.fit(X)
    movie_vec = movie_vec.reshape(1,-1)
    neighbour = kNN.kneighbors(movie_vec, return_distance=show_distance)
    for i in range(0,k):
        n = neighbour.item(i)
        neighbour_ids.append(movie_inv_mapper[n])
    neighbour_ids.pop(0)

11
Deep Learning 1803441

    return neighbour_ids
    
movie_titles = dict(zip(movies['movieId'], movies['title']))
  
movie_id = 3
  
similar_ids = find_similar_movies(movie_id, X, k=10)
movie_title = movie_titles[movie_id]
  
print(f"Since you watched {movie_title}")
for i in similar_ids:
print(movie_titles[i])

OUTPUT:
Number of ratings: 100836

Number of unique movieId's: 9724

Number of unique users: 610

Average number of ratings per user: 165.3

Average number of ratings per movie: 10.37

# lowest rated

movieId title genres

2689 3604 Gypsy (1962) Musical

# highest rated

movieId title genres

48 53 Lamerica (1994) Adventure|Drama

# who rate highest rated movie

userId movieId rating timestamp

13368 85 53 5.0 889468268

96115 603 53 5.0 963180003

12
Deep Learning 1803441

# who rate lowest rated movie

userId movieId rating timestamp

13633 89 3604 0.5 1520408880

Since you watched Grumpier Old Men (1995)

Grumpy Old Men (1993)

Striptease (1996)

Nutty Professor, The (1996)

Twister (1996)

Father of the Bride Part II (1995)

Broken Arrow (1996)

Bio-Dome (1996)

Truth About Cats & Dogs, The (1996)

Sabrina (1995)

Birdcage, The (1996)

PRACTICAL 6

13
Deep Learning 1803441

AIM: Backpropagation in Neural Networks using numpy.


First, we will import all the packages that we will need. We will need numpy, h5py (for loading
dataset stored in H5 file), and matplotlib (for plotting).

import numpy as np
import matplotlib.pyplot as plt
import h5py

nn_backprop.py:

import numpy as np

import random

import math

import sys

# helper functions

def loadFile(df):

resultList = []

f = open(df, 'r')

for line in f:

line = line.rstrip('\n') # "1.0,2.0,3.0"

sVals = line.split(',') # ["1.0", "2.0, "3.0"]

fVals = list(map(np.float32, sVals)) # [1.0, 2.0, 3.0]

resultList.append(fVals) # [[1.0, 2.0, 3.0] , [4.0, 5.0, 6.0]]

f.close()

return np.asarray(resultList, dtype=np.float32) # not necessary

# end loadFile

14
Deep Learning 1803441

def showVector(v, dec):

fmt = "%." + str(dec) + "f" # like %.4f

for i in range(len(v)):

x = v[i]

if x >= 0.0: print(' ', end='')

print(fmt % x + ' ', end='')

print('')

def showMatrix(m, dec):

fmt = "%." + str(dec) + "f" # like %.4f

for i in range(len(m)):

for j in range(len(m[i])):

x = m[i,j]

if x >= 0.0: print(' ', end='')

print(fmt % x + ' ', end='')

print('')

def showMatrixPartial(m, numRows, dec, indices):

fmt = "%." + str(dec) + "f" # like %.4f

lastRow = len(m) - 1

width = len(str(lastRow))

for i in range(numRows):

if indices == True:

print("[", end='')

print(str(i).rjust(width), end='')

print("] ", end='')

for j in range(len(m[i])):

15
Deep Learning 1803441

x = m[i,j]

if x >= 0.0: print(' ', end='')

print(fmt % x + ' ', end='')

print('')

print(" . . . ")

if indices == True:

print("[", end='')

print(str(lastRow).rjust(width), end='')

print("] ", end='')

for j in range(len(m[lastRow])):

x = m[lastRow,j]

if x >= 0.0: print(' ', end='')

print(fmt % x + ' ', end='')

print('')

class NeuralNetwork:

def __init__(self, numInput, numHidden, numOutput, seed):

self.ni = numInput

self.nh = numHidden

self.no = numOutput

self.iNodes = np.zeros(shape=[self.ni], dtype=np.float32)

self.hNodes = np.zeros(shape=[self.nh], dtype=np.float32)

self.oNodes = np.zeros(shape=[self.no], dtype=np.float32)

self.ihWeights = np.zeros(shape=[self.ni,self.nh], dtype=np.float32)

self.hoWeights = np.zeros(shape=[self.nh,self.no], dtype=np.float32)

16
Deep Learning 1803441

self.hBiases = np.zeros(shape=[self.nh], dtype=np.float32)

self.oBiases = np.zeros(shape=[self.no], dtype=np.float32)

self.rnd = random.Random(seed) # allows multiple instances

self.initializeWeights()

def setWeights(self, weights):

if len(weights) != self.totalWeights(self.ni, self.nh, self.no):

print("Warning: len(weights) error in setWeights()")

idx = 0

for i in range(self.ni):

for j in range(self.nh):

self.ihWeights[i,j] = weights[idx]

idx += 1

for j in range(self.nh):

self.hBiases[j] = weights[idx]

idx += 1

for j in range(self.nh):

for k in range(self.no):

self.hoWeights[j,k] = weights[idx]

idx += 1

for k in range(self.no):

self.oBiases[k] = weights[idx]

idx += 1

def getWeights(self):

tw = self.totalWeights(self.ni, self.nh, self.no)

result = np.zeros(shape=[tw], dtype=np.float32)

17
Deep Learning 1803441

idx = 0 # points into result

for i in range(self.ni):

for j in range(self.nh):

result[idx] = self.ihWeights[i,j]

idx += 1

for j in range(self.nh):

result[idx] = self.hBiases[j]

idx += 1

for j in range(self.nh):

for k in range(self.no):

result[idx] = self.hoWeights[j,k]

idx += 1

for k in range(self.no):

result[idx] = self.oBiases[k]

idx += 1

return result

def initializeWeights(self):

numWts = self.totalWeights(self.ni, self.nh, self.no)

wts = np.zeros(shape=[numWts], dtype=np.float32)

lo = -0.01; hi = 0.01

for idx in range(len(wts)):

wts[idx] = (hi - lo) * self.rnd.random() + lo

self.setWeights(wts)

def computeOutputs(self, xValues):

hSums = np.zeros(shape=[self.nh], dtype=np.float32)

18
Deep Learning 1803441

oSums = np.zeros(shape=[self.no], dtype=np.float32)

for i in range(self.ni):

self.iNodes[i] = xValues[i]

for j in range(self.nh):

for i in range(self.ni):

hSums[j] += self.iNodes[i] * self.ihWeights[i,j]

for j in range(self.nh):

hSums[j] += self.hBiases[j]

for j in range(self.nh):

self.hNodes[j] = self.hypertan(hSums[j])

for k in range(self.no):

for j in range(self.nh):

oSums[k] += self.hNodes[j] * self.hoWeights[j,k]

for k in range(self.no):

oSums[k] += self.oBiases[k]

softOut = self.softmax(oSums)

for k in range(self.no):

self.oNodes[k] = softOut[k]

result = np.zeros(shape=self.no, dtype=np.float32)

for k in range(self.no):

result[k] = self.oNodes[k]

return result

def train(self, trainData, maxEpochs, learnRate):

hoGrads = np.zeros(shape=[self.nh, self.no], dtype=np.float32) # hidden-to-output


weights gradients

19
Deep Learning 1803441

obGrads = np.zeros(shape=[self.no], dtype=np.float32) # output node biases gradients

ihGrads = np.zeros(shape=[self.ni, self.nh], dtype=np.float32) # input-to-hidden


weights gradients

hbGrads = np.zeros(shape=[self.nh], dtype=np.float32) # hidden biases gradients

oSignals = np.zeros(shape=[self.no], dtype=np.float32) # output signals: gradients w/o


assoc. input terms

hSignals = np.zeros(shape=[self.nh], dtype=np.float32) # hidden signals: gradients


w/o assoc. input terms

epoch = 0

x_values = np.zeros(shape=[self.ni], dtype=np.float32)

t_values = np.zeros(shape=[self.no], dtype=np.float32)

numTrainItems = len(trainData)

indices = np.arange(numTrainItems) # [0, 1, 2, . . n-1] # rnd.shuffle(v)

while epoch < maxEpochs:

self.rnd.shuffle(indices) # scramble order of training items

for ii in range(numTrainItems):

idx = indices[ii]

for j in range(self.ni):

x_values[j] = trainData[idx, j] # get the input values

for j in range(self.no):

t_values[j] = trainData[idx, j+self.ni] # get the target values

self.computeOutputs(x_values) # results stored internally

# 1. compute output node signals

for k in range(self.no):

derivative = (1 - self.oNodes[k]) * self.oNodes[k] # softmax

oSignals[k] = derivative * (self.oNodes[k] - t_values[k]) # E=(t-o)^2 do E'=(o-t)

20
Deep Learning 1803441

# 2. compute hidden-to-output weight gradients using output signals

for j in range(self.nh):

for k in range(self.no):

hoGrads[j, k] = oSignals[k] * self.hNodes[j]

# 3. compute output node bias gradients using output signals

for k in range(self.no):

obGrads[k] = oSignals[k] * 1.0 # 1.0 dummy input can be dropped

# 4. compute hidden node signals

for j in range(self.nh):

sum = 0.0

for k in range(self.no):

sum += oSignals[k] * self.hoWeights[j,k]

derivative = (1 - self.hNodes[j]) * (1 + self.hNodes[j]) # tanh activation

hSignals[j] = derivative * sum

# 5 compute input-to-hidden weight gradients using hidden signals

for i in range(self.ni):

for j in range(self.nh):

ihGrads[i, j] = hSignals[j] * self.iNodes[i]

# 6. compute hidden node bias gradients using hidden signals

for j in range(self.nh):

hbGrads[j] = hSignals[j] * 1.0 # 1.0 dummy input can be dropped

# update weights and biases using the gradients

# 1. update input-to-hidden weights

for i in range(self.ni):

for j in range(self.nh):

21
Deep Learning 1803441

delta = -1.0 * learnRate * ihGrads[i,j]

self.ihWeights[i, j] += delta

# 2. update hidden node biases

for j in range(self.nh):

delta = -1.0 * learnRate * hbGrads[j]

self.hBiases[j] += delta

# 3. update hidden-to-output weights

for j in range(self.nh):

for k in range(self.no):

delta = -1.0 * learnRate * hoGrads[j,k]

self.hoWeights[j, k] += delta

# 4. update output node biases

for k in range(self.no):

delta = -1.0 * learnRate * obGrads[k]

self.oBiases[k] += delta

epoch += 1

if epoch % 10 == 0:

mse = self.meanSquaredError(trainData)

print("epoch = " + str(epoch) + " ms error = %0.4f " % mse)

# end while

result = self.getWeights()

return result

# end train

def accuracy(self, tdata): # train or test data matrix

num_correct = 0; num_wrong = 0

22
Deep Learning 1803441

x_values = np.zeros(shape=[self.ni], dtype=np.float32)

t_values = np.zeros(shape=[self.no], dtype=np.float32)

for i in range(len(tdata)): # walk thru each data item

for j in range(self.ni): # peel off input values from curr data row

x_values[j] = tdata[i,j]

for j in range(self.no): # peel off tareget values from curr data row

t_values[j] = tdata[i, j+self.ni]

y_values = self.computeOutputs(x_values) # computed output values)

max_index = np.argmax(y_values) # index of largest output value

if abs(t_values[max_index] - 1.0) < 1.0e-5:

num_correct += 1

else:

num_wrong += 1

return (num_correct * 1.0) / (num_correct + num_wrong)

def meanSquaredError(self, tdata): # on train or test data matrix

sumSquaredError = 0.0

x_values = np.zeros(shape=[self.ni], dtype=np.float32)

t_values = np.zeros(shape=[self.no], dtype=np.float32)

for ii in range(len(tdata)): # walk thru each data item

for jj in range(self.ni): # peel off input values from curr data row

x_values[jj] = tdata[ii, jj]

for jj in range(self.no): # peel off tareget values from curr data row

t_values[jj] = tdata[ii, jj+self.ni]

y_values = self.computeOutputs(x_values) # computed output values

for j in range(self.no):

23
Deep Learning 1803441

err = t_values[j] - y_values[j]

sumSquaredError += err * err # (t-o)^2

return sumSquaredError / len(tdata)

@staticmethod

def hypertan(x):

if x < -20.0:

return -1.0

elif x > 20.0:

return 1.0

else:

return math.tanh(x)

@staticmethod

def softmax(oSums):

result = np.zeros(shape=[len(oSums)], dtype=np.float32)

m = max(oSums)

divisor = 0.0

for k in range(len(oSums)):

divisor += math.exp(oSums[k] - m)

for k in range(len(result)):

result[k] = math.exp(oSums[k] - m) / divisor

return result

@staticmethod

def totalWeights(nInput, nHidden, nOutput):

tw = (nInput * nHidden) + (nHidden * nOutput) + nHidden + nOutput

return tw

24
Deep Learning 1803441

# end class NeuralNetwork

def main():

print("\nBegin NN back-propagation demo \n")

pv = sys.version

npv = np.version.version

print("Using Python version " + str(pv) +

"\n and NumPy version " + str(npv))

numInput = 4

numHidden = 5

numOutput = 3

print("\nCreating a %d-%d-%d neural network " %

(numInput, numHidden, numOutput) )

nn = NeuralNetwork(numInput, numHidden, numOutput, seed=3)

print("\nLoading Iris training and test data ")

trainDataPath = "irisTrainData.txt"

trainDataMatrix = loadFile(trainDataPath)

print("\nTest data: ")

showMatrixPartial(trainDataMatrix, 4, 1, True)

testDataPath = "irisTestData.txt"

testDataMatrix = loadFile(testDataPath)

maxEpochs = 50

learnRate = 0.05

print("\nSetting maxEpochs = " + str(maxEpochs))

print("Setting learning rate = %0.3f " % learnRate)

print("\nStarting training")

25
Deep Learning 1803441

nn.train(trainDataMatrix, maxEpochs, learnRate)

print("Training complete")

accTrain = nn.accuracy(trainDataMatrix)

accTest = nn.accuracy(testDataMatrix)

print("\nAccuracy on 120-item train data = %0.4f " % accTrain)

print("Accuracy on 30-item test data = %0.4f " % accTest)

print("\nEnd demo \n")

if __name__ == "__main__":

main()

# end script

26
Deep Learning 1803441

PRACTICAL 7
AIM: Text classification and word vector.
import numpy as np
from sklearn.feature_extraction.text import
CountVectorizer
vectorizer = CountVectorizer()
#
# Create sample set of documents
#
docs = np.array(['Mirabai has won a silver medal
in weight lifting in Tokyo olympics 2021',
'Sindhu has won a bronze medal in
badminton in Tokyo olympics',
'Indian hockey team is in top
four team in Tokyo olympics 2021 after 40 years'])
#
# Fit the bag-of-words model
#
bag = vectorizer.fit_transform(docs)
#
# Get unique words / tokens found in all the
documents. The unique words / tokens represents
# the features
#
print(vectorizer.get_feature_names())
#
# Associate the indices with each unique word
#
print(vectorizer.vocabulary_)
#
# Print the numerical feature vector
#
print(bag.toarray()

27
Deep Learning 1803441

OUTPUT:

28
Deep Learning 1803441

PRACTICAL 8
AIM: Fully convolutional networks.

29
Deep Learning 1803441

OUTPUT:

30
Deep Learning 1803441

PRACTICAL 9
AIM: ConvNets for classification and localization.
import cv2
import os
import numpy as np
from random import shuffle
from tqdm import tqdm
'
TRAIN_DIR = 'E:/dataset / Cats_vs_Dogs / train'
TEST_DIR = 'E:/dataset / Cats_vs_Dogs / test1'
IMG_SIZE = 50
LR = 1e-3
MODEL_NAME = 'dogsvscats-{}-{}.model'.format(LR,
'6conv-basic')
def label_img(img):
word_label = img.split('.')[-3]
if word_label == 'cat': return [1, 0]
elif word_label == 'dog': return [0, 1]
def create_train_data():
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR, img)
img = cv2.imread(path,
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR, img)
img = cv2.imread(path,
cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
training_data.append([np.array(img),
np.array(label)])
shuffle(training_data)
np.save('train_data.npy', training_data)
return training_data
def process_test_data():
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR, img)
img_num = img.split('.')[0]
img = cv2.imread(path,
cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
testing_data.append([np.array(img),

31
Deep Learning 1803441

img_num])
shuffle(testing_data)
np.save('test_data.npy', testing_data)
return testing_data

OUTPUT:

32
Deep Learning 1803441

PRACTICAL 10
AIM: Character level language model.
gpu = gpuDevice();
fprintf('Using a %s GPU.\n', gpu.Name)
sizeOfDouble = 8; % Each double-precision number needs 8
bytes of storage
sizes = power(2, 14:28);
sendTimes = inf(size(sizes));
gatherTimes = inf(size(sizes));
for ii=1:numel(sizes)
numElements = sizes(ii)/sizeOfDouble;
hostData = randi([0 9], numElements, 1);
gpuData = randi([0 9], numElements, 1, 'gpuArray');
% Time sending to GPU
sendFcn = @() gpuArray(hostData);
sendTimes(ii) = gputimeit(sendFcn);
% Time gathering back from GPU
gatherFcn = @() gather(gpuData);
gatherTimes(ii) = gputimeit(gatherFcn);
end
sendBandwidth = (sizes./sendTimes)/1e9;
[maxSendBandwidth,maxSendIdx] = max(sendBandwidth);
fprintf('Achieved peak send speed of %g GB/
s\n',maxSendBandwidth)
gatherBandwidth = (sizes./gatherTimes)/1e9;
[maxGatherBandwidth,maxGatherIdx] = max(gatherBandwidth);
fprintf('Achieved peak gather speed of %g GB/s\n',max(gatherBandwidth))

OUTPUT:

33

You might also like