Professional Documents
Culture Documents
Class: BE
Subject: AISC
Experiment No. 09
Title Implement object detection using convolutional Neural Network
learning
Below
Average Good
Expectation
Knowledge (4) 2 3 4
Performance (5) 2 3 5
Marks Obtained:
Signature of Faculty:
EXPERIMENT NO 9
OBJECT DETECTION
CODE:
# import libraries
import pandas as pd
import numpy as np
sub.head()
# save the path of train and test directories where we saved cats and dogs images
after unzipping the input files
original_dataset_train_dir = './train'
original_dataset_test_dir = './test'
# sanity check - lets check the folder where we copied the data has the data as
expected
print("train_cats_dir:",len(os.listdir(train_cats_dir)))
print("validation_cats_dir:",len(os.listdir(validation_cats_dir)))
print("test_cats_dir:",len(os.listdir(test_cats_dir)))
print("train_dogs_dir:",len(os.listdir(train_dogs_dir)))
print("validation_dogs_dir:",len(os.listdir(validation_dogs_dir)))
print("test_dogs_dir:",len(os.listdir(test_dogs_dir)))
train_generator = train_datagen.flow_from_directory(train_dir,target_size=(150,
150),batch_size=20,class_mode='binary')
validation_generator =
test_datagen.flow_from_directory(validation_dir,target_size=(150,
150),batch_size=20,class_mode='binary')
# since image generators yields bathches indefinitely, we need to break the loop
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
EPOCH_1 = 1
EPOCH_100 = 100
EPOCH_30 = 30
model = Sequential()
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Let’s plot the loss and accuracy of the model over the training and validation
data during training
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
img_path = fnames[3]
img = image.load_img(img_path, target_size=(150, 150))
x = image.img_to_array(img)
x = x.reshape((1,) + x.shape)
plt.figure(figsize = (12,8))
i=0
plt.show()
model_1 = Sequential()
model_1.add(Flatten())
model_1.add(Dense(512, activation='relu'))
model_1.add(Dropout(0.3)) # to overcome overfitting
model_1.add(Dense(512, activation='relu'))
model_1.add(Dropout(0.3)) # to overcome overfitting
model_1.add(Dense(1, activation='sigmoid'))
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
train_generator = train_datagen.flow_from_directory(train_dir,target_size=(150,
150),batch_size=32,class_mode='binary')
validation_generator =
test_datagen.flow_from_directory(validation_dir,target_size=(150,
150),batch_size=32,class_mode='binary')
history = model_1.fit_generator(
train_generator,
steps_per_epoch=63,
epochs = EPOCH_100,
validation_data=validation_generator,
validation_steps=32)
# Let’s plot the loss and accuracy of the model over the training and validation
data during training
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
conv_base.summary()
#base_dir = '/Users/fchollet/Downloads/cats_and_dogs_small'
#train_dir = os.path.join(base_dir, 'train')
#validation_dir = os.path.join(base_dir, 'validation')
#test_dir = os.path.join(base_dir, 'test')
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20
i=0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
break
return features, labels
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
model.summary()
print('This is the number of trainable weights before freezing the conv base:' ,
len(model.trainable_weights))
print('This is the number of trainable weights after freezing the conv base:' ,
len(model.trainable_weights))
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
validation_generator =
test_datagen.flow_from_directory(validation_dir,target_size=(150,
150),batch_size=20,class_mode='binary')
model.compile(loss='binary_crossentropy',optimizer=optimizers.RMSprop(lr=2e-
5),metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=EPOCH_30,
validation_data=validation_generator,
validation_steps=50)
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
conv_base.trainable = True
set_trainable = False
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
model.compile(loss='binary_crossentropy',optimizer=optimizers.RMSprop(lr=1e-
5),metrics=['acc'])
history =
model.fit_generator(train_generator,steps_per_epoch=100,epochs=EPOCH_100,validation
_data=validation_generator,validation_steps=50)
plt.show()
OUTPUT: