Professional Documents
Culture Documents
data_dir = './Assignment4/Dataset'
img_width, img_height = 64, 64
batch_size = 32
train_samples_per_class = 120
val_samples_per_class = 30
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(data_dir,
target_size=(img_width,
img_height),
batch_size=train_samples_per_class*8,
class_mode='categorical')
val_generator = val_datagen.flow_from_directory(data_dir,
target_size=(img_width,
img_height),
batch_size=val_samples_per_class*8,
class_mode='categorical')
print("Starting training")
# Define the CNN architecture
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(img_width,
img_height, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(8, activation='softmax'))
Output:
Question No 02:
Code:
import numpy as np
import cv2
import os
import tensorflow as tf
from skimage.feature import local_binary_pattern
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
from sklearn.preprocessing import LabelEncoder
from keras.applications import MobileNet as SqueezeNet
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from google.colab import drive
drive.mount('/content/drive')
def extract_features(img,modelname):
if modelname=='googlenet':
# Resize the image to the minimum required size of 75x75
img = cv2.resize(img, (75, 75))
# Preprocess the image
img = tf.keras.applications.inception_v3.preprocess_input(img)
# Extract deep features using the pre-trained GoogleNet model
deep_features = modelGoogle.predict(np.expand_dims(img,
axis=0)).flatten()
elif modelname == 'squeezenet':
# Resize the image to the input size
img = cv2.resize(img, input_size)
# Preprocess the image
img = tf.keras.applications.mobilenet.preprocess_input(img)
# Extract deep features using the pre-trained SqueezeNet model
deep_features = modelSqueeze.predict(np.expand_dims(img,
axis=0)).flatten()
elif modelname == 'googleandsqueeze':
# Resize the image to the minimum required size of 75x75
imgGoogle = cv2.resize(img, (75, 75))
# Preprocess the image
imgGoogle =
tf.keras.applications.inception_v3.preprocess_input(imgGoogle)
# Extract deep features using the pre-trained GoogleNet model
deep_features_google =
modelGoogle.predict(np.expand_dims(imgGoogle, axis=0)).flatten()
# Resize the image to the input size
imgSqueeze = cv2.resize(img, input_size)
# Preprocess the image
imgSqueeze =
tf.keras.applications.mobilenet.preprocess_input(imgSqueeze)
# Extract deep features using the pre-trained model
deep_features_squeeze =
modelSqueeze.predict(np.expand_dims(imgSqueeze, axis=0)).flatten()
# Concatenate the deep features and LBP features
deep_features = np.concatenate((deep_features_google,
deep_features_squeeze))
return features
# Split the dataset into training and testing sets for googlenet and
LBP
train_features_googleandLBP, test_features_googleandLBP,
train_labels_googleandLBP, test_labels_googleandLBP =
train_test_split(featuresGoogleandLBP, labels, test_size=0.3,
random_state=42)
# Split the dataset into training and testing sets for squeezenet and
LBP
train_features_squeezeandLBP, test_features_squeezeandLBP,
train_labels_squeezeandLBP, test_labels_squeezeandLBP =
train_test_split(featuresSqueezeandLBP, labels, test_size=0.3,
random_state=42)
# Split the dataset into training and testing sets for googlenet and
squeezenet and LBP
train_features_googleandsqueezeandLBP,
test_features_googleandsqueezeandLBP,
train_labels_googleandsqueezeandLBP, test_labels_googleandsqueezeandLBP
= train_test_split(featuresGoogleandSqueezeandLBP, labels,
test_size=0.3, random_state=42)
# Train an SVM classifier using one-vs-one approach for Google and LBP
svm_ovo = OneVsOneClassifier(SVC(kernel='linear', random_state=42))
svm_ovo.fit(train_features_googleandLBP, train_labels_googleandLBP)
# Test the classifier on the testing set for Google and LBP
predovogoogleandLBP = svm_ovo.predict(test_features_googleandLBP)
accovogoogleandLBP = accuracy_score(test_labels_googleandLBP,
predovogoogleandLBP)
print('Accuracy (one-vs-one) for Googlenet and LBP:',
accovogoogleandLBP)
# Train an SVM classifier using one-vs-all approach for Google and LBP
svm_ova = OneVsRestClassifier(SVC(kernel='linear', random_state=42))
svm_ova.fit(train_features_googleandLBP, train_labels_googleandLBP)
# Test the classifier on the testing set for Google and LBP
predovagoogleandLBP = svm_ova.predict(test_features_googleandLBP)
accovagoogleandLBP = accuracy_score(test_labels_googleandLBP,
predovagoogleandLBP)
print('Accuracy (one-vs-all) for Googlenet and LBP:',
accovagoogleandLBP)
# Train an SVM classifier using one-vs-one approach for Squeeze and LBP
svm_ovo = OneVsOneClassifier(SVC(kernel='linear', random_state=42))
svm_ovo.fit(train_features_squeezeandLBP, train_labels_squeezeandLBP)
# Test the classifier on the testing set for Squeeze and LBP
predovosqueezeandLBP = svm_ovo.predict(test_features_squeezeandLBP)
accovosqueezeandLBP = accuracy_score(test_labels_squeezeandLBP,
predovosqueezeandLBP)
print('Accuracy (one-vs-one) for SqueezeNet and LBP:',
accovosqueezeandLBP)
# Train an SVM classifier using one-vs-all approach for Squeeze and LBP
svm_ova = OneVsRestClassifier(SVC(kernel='linear', random_state=42))
svm_ova.fit(train_features_squeezeandLBP, train_labels_squeezeandLBP)
# Test the classifier on the testing set for Squeeze and LBP
predovasqueezeandLBP = svm_ova.predict(test_features_squeezeandLBP)
accovasqueezeandLBP = accuracy_score(test_labels_squeezeandLBP,
predovasqueezeandLBP)
print('Accuracy (one-vs-all) for SqueezeNet and LBP:',
accovasqueezeandLBP)
# Test the classifier on the testing set for Google and Squeeze and LBP
predovogoogleandsqueezeandLBP =
svm_ovo.predict(test_features_googleandsqueezeandLBP)
accovogoogleandsqueezeandLBP =
accuracy_score(test_labels_googleandsqueezeandLBP,
predovogoogleandsqueezeandLBP)
print('Accuracy (one-vs-one) for GoogleNet and SqueezeNet and LBP:',
accovogoogleandsqueezeandLBP)
# Test the classifier on the testing set for Google and Squeeze and LBP
predovagoogleandsqueezeandLBP =
svm_ova.predict(test_features_googleandsqueezeandLBP)
accovagoogleandsqueezeandLBP =
accuracy_score(test_labels_googleandsqueezeandLBP,
predovagoogleandsqueezeandLBP)
print('Accuracy (one-vs-all) for GoogleNet and SqueezeNet and LBP:',
accovagoogleandsqueezeandLBP)
# Define the results as a dictionary
results = {'Model': ['Googlenet+LBP', 'SqueezeNet+LBP',
'Googlenet+SqueezeNet+LBP'],
'Accuracy (one-vs-one)': [accovogoogleandLBP,
accovosqueezeandLBP, accovogoogleandsqueezeandLBP],
'Accuracy (one-vs-all)': [accovagoogleandLBP,
accovasqueezeandLBP, accovagoogleandsqueezeandLBP]}