You are on page 1of 12

2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.

ipynb - Colaboratory

Objective
- To classify the Collected Wheat_leaf image as healthy, Strip_rust,Leaf_rust,Powdery,Septoria,or TanSpot
- Multiclass classification problem
-bla bla....

Data Sources
-The dataset is originally collected from Research_Field @ Kulumsa_Agricultural_Research_Institute.
- A few dataset also collected from Previously collected by Researchers/Experts @KARC

DataSet_Folder structure
DataSet

Leaf_rust

LR1.jpg
LR2.jpg
...
Strip_rust

SR1.jpg
SR2.jpg
Automatic saving failed. This file was updated remotely or in another tab. Show diff
...

Powdery

Powd1.jpg
Powd2.jpg
...

Septoria

Sept1.jpg
Sept2.jpg
...
TanSpot

Tan1.jpg
Tan2.jpg
...
Healthy

Hlthy1.jpg
Hlthy2.jpg
...

Dependencies

import os
import numpy as np
import seaborn as sns
import pathlib
import PIL
from glob import glob
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import confusion_matrix, classification_report
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Dense,Flatten,Dropout,Conv2D, MaxPooling2D, GlobalAveragePooling2D,Input
from tensorflow.keras.applications import ResNet50, resnet50, Xception, xception
from tensorflow.keras.applications.xception import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 1/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory
from tensorflow.keras.losses import CategoricalCrossentropy
import tensorflow as tf
from tensorflow.keras.preprocessing.image import load_img, img_to_array

print(tf.__version__)
############ settings ############
2.9.2

from google.colab import drive


drive.mount('/content/drive')

Mounted at /content/drive

data_dir = "/content/drive/MyDrive/My_Recent_Data/WheatLeaf_Label_Resiz_Augm"

batch_size = 32
img_height = 200
img_width = 200

train_path = pathlib.Path(data_dir)
image_count = len(list(train_path.glob('*/*')))
print(image_count)

4554

Data preprocessing
Automatic saving failed. This file was updated remotely or in another tab. Show diff

- Loading and splitting data


- Train:80% and Validation:20%

train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)

Found 4554 files belonging to 6 classes.


Using 3644 files for training.

val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)

Found 4554 files belonging to 6 classes.


Using 910 files for validation.

EDA

def plot_distribution(y, title):


a = Counter(y)
y = ['Healthy', 'Leaf_rust', 'Powdery_Mildew', 'Septoria_tritic_blotch','Strip_rust','Tanspot' ]

x = [a[i] for i in range(6)]


plt.figure(figsize=(8, 3))
plt.barh(y, x, color='Green')
plt.title(title)
return plt.show()

# Target distribution

y_train = np.concatenate([y for x, y in train_ds], axis=0)


y_val = np.concatenate([y for x, y in val_ds], axis=0)

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 2/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory

plot_distribution(y_train, title="Train images")


plot_distribution(y_val, title = "Validation images")

## Sample images
class_names = train_ds.class_names
import matplotlib.pyplot as plt
plt.figure(figsize=(17, 17))
for images, labels in train_ds.take(1):
for i in range(6):
ax = plt.subplot(4, 6, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")

Model Training
Automatic saving failed. This file was updated remotely or in another tab. Show diff
# Configure the dataset for performance
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

# Model architecture

num_classes = 6

model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(img_height, img_width, 3)),
tf.keras.layers.experimental.preprocessing.Rescaling(1./255),

tf.keras.layers.Conv2D(16, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),

tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),

tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),

tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_classes, activation='softmax')
])

model.summary()

from tensorflow.keras.models import Sequential, Model


from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D, Dropout
from tensorflow.keras.applications.resnet import ResNet50

# Defining the pretrained base model


base_model = ResNet50(include_top=False, weights='imagenet', input_shape =(200,200,3))

base_model.trainable=False
input_img=Input(shape=(200,200,3),name='input_image')
resnet=base_model(input_img,training=False)

resnet=GlobalAveragePooling2D()(resnet)
resnet=Dense(6,activation='softmax',name='classif_layer')(resnet)
resnet=Dropout(rate=0.2,name='drop_out')(resnet)

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 3/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory
resnet_model=Model(inputs=input_img,outputs=resnet,name='WLD_Prediction_Model')
resnet_model.summary()

Model: "WLD_Prediction_Model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_image (InputLayer) [(None, 200, 200, 3)] 0

resnet50 (Functional) (None, 7, 7, 2048) 23587712

global_average_pooling2d_2 (None, 2048) 0


(GlobalAveragePooling2D)

classif_layer (Dense) (None, 6) 12294

=================================================================
Total params: 23,600,006
Trainable params: 12,294
Non-trainable params: 23,587,712
_________________________________________________________________

from tensorflow.keras.applications.vgg19 import VGG19


# Defining the pretrained base model
base_model = VGG19(include_top=False, weights='imagenet', input_shape =(200,200,3) , pooling='avg')

# Making sure the existing weights ar not trained


for layer in base_model.layers:
layer.trainable=False
Automatic saving failed. This file was updated remotely or in another tab. Show diff
x = Flatten()(base_model.output)
#x = base_model.output
#x = GlobalAveragePooling2D()(x)
# Defining the head of the model where the prediction is conducted
pred = Dense(6, activation='softmax')(x)
# Combining base and head
model = Model(inputs=base_model.input, outputs=pred,name='My_Vgg19')

model.summary()

Show hidden output

Transfer learning with Xception

from keras.engine.sequential import input_layer


#Model building
base_model= tf.keras.applications.Xception(include_top=False,
input_shape=(200, 200,3),
#pooling='avg',
weights='imagenet')
base_model.trainable=False

input_img=Input(shape=(200,200,3),name='input_image')
xception=base_model(input_img,training=False)

xception=GlobalAveragePooling2D()(xception)
#xception=Dense(512,activation='relu')(xception)
xception=Dropout(rate=0.2,name='drop_out')(xception)
xception=Dense(6,activation='softmax',name='classif_layer')(xception)

xception_model=Model(inputs=input_img,outputs=xception,name='WLD_Prediction_Model')
xception_model.summary()

Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels


83683744/83683744 [==============================] - 0s 0us/step
Model: "WLD_Prediction_Model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_image (InputLayer) [(None, 200, 200, 3)] 0

xception (Functional) (None, 7, 7, 2048) 20861480

global_average_pooling2d (G (None, 2048) 0


lobalAveragePooling2D)

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 4/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory

drop_out (Dropout) (None, 2048) 0

classif_layer (Dense) (None, 6) 12294

=================================================================
Total params: 20,873,774
Trainable params: 12,294
Non-trainable params: 20,861,480
_________________________________________________________________

base_learning_rate = 0.001
resnet_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

earlystop_callback = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy',
min_delta=0.0001,

# patience=5
)
history = resnet_model.fit(train_ds,
validation_data=val_ds,
epochs=20,
# callbacks=[earlystop_callback]
)

Epoch 1/20
114/114 [==============================] - 1917s 16s/step - loss: 1.0149 - accuracy: 0.6358 - val_loss: 0.6820 - val_accuracy: 0.7758
Automatic
Epochsaving
2/20 failed. This file was updated remotely or in another tab. Show diff
114/114 [==============================] - 13s 116ms/step - loss: 0.5432 - accuracy: 0.8304 - val_loss: 0.5335 - val_accuracy: 0.8374
Epoch 3/20
114/114 [==============================] - 13s 114ms/step - loss: 0.4157 - accuracy: 0.8729 - val_loss: 0.4643 - val_accuracy: 0.8549
Epoch 4/20
114/114 [==============================] - 13s 113ms/step - loss: 0.3393 - accuracy: 0.9018 - val_loss: 0.4235 - val_accuracy: 0.8670
Epoch 5/20
114/114 [==============================] - 13s 112ms/step - loss: 0.2863 - accuracy: 0.9229 - val_loss: 0.3959 - val_accuracy: 0.8758
Epoch 6/20
114/114 [==============================] - 13s 113ms/step - loss: 0.2470 - accuracy: 0.9341 - val_loss: 0.3758 - val_accuracy: 0.8802
Epoch 7/20
114/114 [==============================] - 13s 113ms/step - loss: 0.2163 - accuracy: 0.9457 - val_loss: 0.3604 - val_accuracy: 0.8879
Epoch 8/20
114/114 [==============================] - 13s 113ms/step - loss: 0.1918 - accuracy: 0.9517 - val_loss: 0.3484 - val_accuracy: 0.8934
Epoch 9/20
114/114 [==============================] - 13s 113ms/step - loss: 0.1716 - accuracy: 0.9621 - val_loss: 0.3389 - val_accuracy: 0.8989
Epoch 10/20
114/114 [==============================] - 13s 112ms/step - loss: 0.1547 - accuracy: 0.9682 - val_loss: 0.3314 - val_accuracy: 0.9022
Epoch 11/20
114/114 [==============================] - 13s 112ms/step - loss: 0.1404 - accuracy: 0.9731 - val_loss: 0.3252 - val_accuracy: 0.9022
Epoch 12/20
114/114 [==============================] - 13s 112ms/step - loss: 0.1281 - accuracy: 0.9789 - val_loss: 0.3201 - val_accuracy: 0.9033
Epoch 13/20
114/114 [==============================] - 13s 112ms/step - loss: 0.1174 - accuracy: 0.9808 - val_loss: 0.3159 - val_accuracy: 0.9033
Epoch 14/20
114/114 [==============================] - 13s 113ms/step - loss: 0.1081 - accuracy: 0.9827 - val_loss: 0.3123 - val_accuracy: 0.9055
Epoch 15/20
114/114 [==============================] - 13s 116ms/step - loss: 0.0999 - accuracy: 0.9849 - val_loss: 0.3093 - val_accuracy: 0.9077
Epoch 16/20
114/114 [==============================] - 13s 114ms/step - loss: 0.0926 - accuracy: 0.9863 - val_loss: 0.3068 - val_accuracy: 0.9055
Epoch 17/20
114/114 [==============================] - 13s 112ms/step - loss: 0.0861 - accuracy: 0.9877 - val_loss: 0.3048 - val_accuracy: 0.9055
Epoch 18/20
114/114 [==============================] - 13s 114ms/step - loss: 0.0803 - accuracy: 0.9879 - val_loss: 0.3033 - val_accuracy: 0.9077
Epoch 19/20
114/114 [==============================] - 13s 116ms/step - loss: 0.0751 - accuracy: 0.9896 - val_loss: 0.3022 - val_accuracy: 0.9088
Epoch 20/20
114/114 [==============================] - 13s 115ms/step - loss: 0.0705 - accuracy: 0.9904 - val_loss: 0.3015 - val_accuracy: 0.9099

loss, acc = resnet_model.evaluate(val_ds, batch_size=batch_size)


print("validation accuracy :", round(acc, 2))
print("validation loss :", round(loss,2))

29/29 [==============================] - 3s 93ms/step - loss: 0.3015 - accuracy: 0.9099


validation accuracy : 0.91
validation loss : 0.3

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 5/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory
loss, acc = resnet_model.evaluate(train_ds, batch_size=batch_size)
print("Train accuracy :", round(acc, 2))
print("Train loss :", round(loss,2))

114/114 [==============================] - 10s 90ms/step - loss: 0.0669 - accuracy: 0.9901


Train accuracy : 0.99
Train loss : 0.07

Model Evaluation
Feature Extraction

train_loss = history.history['loss']
train_acc = history.history['accuracy']
valid_loss = history.history['val_loss']
valid_acc = history.history['val_accuracy']

# Accuracy plots
plt.figure(figsize=(8, 4))
plt.plot(train_acc, color='green', linestyle='-', label='train accuracy')
plt.plot(valid_acc, color='blue', linestyle='-', label='val accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

#Automatic
loss plots
saving failed. This file was updated remotely or in another tab. Show diff
plt.figure(figsize=(8, 4))
plt.plot(train_loss, color='orange', linestyle='-', label='train loss')
plt.plot(valid_loss, color='red', linestyle='-', label='val loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

y_pred = [] # store predicted labels


y_true = [] # store true labels

# iterate over the dataset


for image_batch, label_batch in val_ds: # use dataset.unbatch() with repeat
# append true labels
y_true.append(label_batch)
# compute predictions
preds = resnet_model.predict(image_batch)
# append predicted labels

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 6/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory
y_pred.append(np.argmax(preds, axis = - 1))

# convert the true and predicted labels into tensors


correct_labels = tf.concat([item for item in y_true], axis = 0)
predicted_labels = tf.concat([item for item in y_pred], axis = 0)

1/1 [==============================] - 1s 1s/step


1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 42ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 39ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 44ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 29ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 29ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 29ms/step
1/1 [==============================] - 0s 29ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 34ms/step
Automatic saving failed. This file was updated remotely
1/1 [==============================] - 0sor32ms/step
in another tab. Show diff
1/1 [==============================] - 0s 28ms/step
1/1 [==============================] - 1s 802ms/step

cm = confusion_matrix(correct_labels, predicted_labels, normalize='true')


sns.heatmap(cm, annot=True, cmap='viridis', cbar=None)
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=60,fontsize=10,fontweight='bold')
plt.yticks(tick_marks, class_names,fontsize=10,rotation=0,fontweight='bold')
plt.title("Confusion Matrix",fontsize=20, fontweight='bold')
plt.ylabel("Actual Value", fontsize=16)
plt.xlabel("Predicted Value", fontsize=16)
plt.show()

print(classification_report(correct_labels, predicted_labels,target_names=class_names))

precision recall f1-score support

Healthy 0.93 0.99 0.96 141


Leaf_rust 0.89 0.86 0.87 163
Powdery_Mildew 0.91 0.99 0.95 151
Septoria_tritic_blotch 0.90 0.86 0.88 163
Strip_rust 0.87 0.86 0.86 153

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 7/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory
Tanspot 0.96 0.91 0.94 139

accuracy 0.91 910


macro avg 0.91 0.91 0.91 910
weighted avg 0.91 0.91 0.91 910

Fine Tuning

# Defining the pretrained base model


base_model = ResNet50(include_top=False, weights='imagenet', input_shape =(200,200,3))

base_model.trainable=False
input_img=Input(shape=(200,200,3),name='input_image')
resnet=base_model(input_img,training=False)

resnet=GlobalAveragePooling2D()(resnet)
resnet=Dropout(rate=0.2,name='drop_out')(resnet)
resnet=Dense(6,activation='softmax',name='classif_layer')(resnet)
resnet_model=Model(inputs=input_img,outputs=resnet,name='WLD_Prediction_Model')
resnet_model.summary()

Model: "WLD_Prediction_Model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_image (InputLayer) [(None, 200, 200, 3)] 0
Automatic saving failed. This file was updated remotely or in another tab. Show diff
resnet50 (Functional) (None, 7, 7, 2048) 23587712

global_average_pooling2d_8 (None, 2048) 0


(GlobalAveragePooling2D)

drop_out (Dropout) (None, 2048) 0

classif_layer (Dense) (None, 6) 12294

=================================================================
Total params: 23,600,006
Trainable params: 12,294
Non-trainable params: 23,587,712
_________________________________________________________________

base_model.trainable = True

# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))

# Fine-tune from this layer onwards


fine_tune_at = 150

# Freeze all the layers before the `fine_tune_at` layer


for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
#resnet_model=Dropout(rate=0.2,name='drop_out')(resnet_model)

Number of layers in the base model: 175

resnet_model.summary()

Model: "WLD_Prediction_Model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_image (InputLayer) [(None, 200, 200, 3)] 0

resnet50 (Functional) (None, 7, 7, 2048) 23587712

global_average_pooling2d_8 (None, 2048) 0


(GlobalAveragePooling2D)

drop_out (Dropout) (None, 2048) 0

classif_layer (Dense) (None, 6) 12294

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 8/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory
=================================================================
Total params: 23,600,006
Trainable params: 10,002,438
Non-trainable params: 13,597,568
_________________________________________________________________

base_learning_rate = 0.0001
resnet_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

earlystop_callback = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy',
min_delta=0.0001,

#patience=5
)
history = resnet_model.fit(train_ds,
validation_data=val_ds,
epochs=20,
#callbacks=[earlystop_callback]
)

Epoch 1/20
114/114 [==============================] - 22s 156ms/step - loss: 0.7808 - accuracy: 0.7234 - val_loss: 0.3717 - val_accuracy: 0.8692
Epoch 2/20
114/114 [==============================] - 17s 150ms/step - loss: 0.2477 - accuracy: 0.9160 - val_loss: 0.2919 - val_accuracy: 0.8934
Epoch 3/20
114/114 [==============================] - 18s 152ms/step - loss: 0.1239 - accuracy: 0.9569 - val_loss: 0.3970 - val_accuracy: 0.8780
Epoch 4/20
114/114 [==============================] - 18s 151ms/step - loss: 0.0723 - accuracy: 0.9767 - val_loss: 0.2983 - val_accuracy: 0.9154
Automatic
Epochsaving
5/20 failed. This file was updated remotely or in another tab. Show diff
114/114 [==============================] - 18s 151ms/step - loss: 0.0630 - accuracy: 0.9800 - val_loss: 0.2993 - val_accuracy: 0.9077
Epoch 6/20
114/114 [==============================] - 17s 149ms/step - loss: 0.0519 - accuracy: 0.9816 - val_loss: 0.3018 - val_accuracy: 0.9330
Epoch 7/20
114/114 [==============================] - 17s 150ms/step - loss: 0.0271 - accuracy: 0.9901 - val_loss: 0.3588 - val_accuracy: 0.9077
Epoch 8/20
114/114 [==============================] - 19s 167ms/step - loss: 0.0189 - accuracy: 0.9909 - val_loss: 0.3371 - val_accuracy: 0.9341
Epoch 9/20
114/114 [==============================] - 17s 147ms/step - loss: 0.0166 - accuracy: 0.9912 - val_loss: 0.3597 - val_accuracy: 0.9286
Epoch 10/20
114/114 [==============================] - 17s 149ms/step - loss: 0.0270 - accuracy: 0.9904 - val_loss: 0.4720 - val_accuracy: 0.9110
Epoch 11/20
114/114 [==============================] - 18s 151ms/step - loss: 0.0690 - accuracy: 0.9737 - val_loss: 0.3185 - val_accuracy: 0.9264
Epoch 12/20
114/114 [==============================] - 17s 148ms/step - loss: 0.0787 - accuracy: 0.9731 - val_loss: 0.4264 - val_accuracy: 0.8989
Epoch 13/20
114/114 [==============================] - 17s 148ms/step - loss: 0.0306 - accuracy: 0.9871 - val_loss: 0.3037 - val_accuracy: 0.9253
Epoch 14/20
114/114 [==============================] - 17s 150ms/step - loss: 0.0340 - accuracy: 0.9868 - val_loss: 0.3596 - val_accuracy: 0.9286
Epoch 15/20
114/114 [==============================] - 18s 151ms/step - loss: 0.0389 - accuracy: 0.9874 - val_loss: 0.3287 - val_accuracy: 0.9209
Epoch 16/20
114/114 [==============================] - 18s 151ms/step - loss: 0.0359 - accuracy: 0.9855 - val_loss: 0.3251 - val_accuracy: 0.9330
Epoch 17/20
114/114 [==============================] - 17s 149ms/step - loss: 0.0451 - accuracy: 0.9868 - val_loss: 0.4635 - val_accuracy: 0.9209
Epoch 18/20
114/114 [==============================] - 17s 150ms/step - loss: 0.0329 - accuracy: 0.9871 - val_loss: 0.3353 - val_accuracy: 0.9253
Epoch 19/20
114/114 [==============================] - 17s 149ms/step - loss: 0.0168 - accuracy: 0.9912 - val_loss: 0.3786 - val_accuracy: 0.9308
Epoch 20/20
114/114 [==============================] - 18s 152ms/step - loss: 0.0117 - accuracy: 0.9931 - val_loss: 0.3196 - val_accuracy: 0.9407

score_val = resnet_model.evaluate(val_ds, verbose=1)


accuracy_val = 100*score_val[1]
print('Validate accuracy = %.4f%% ' % accuracy_val)

29/29 [==============================] - 3s 97ms/step - loss: 0.3196 - accuracy: 0.9407


Validate accuracy = 94.0659%

score_train = resnet_model.evaluate(train_ds, verbose=1)


accuracy_train = 100*score_train[1]
print('Train accuracy = %.4f%% ' % accuracy_train)

114/114 [==============================] - 11s 95ms/step - loss: 0.0078 - accuracy: 0.9953


Train accuracy = 99.5335%

resnet_model.save("ResNet_Model_9953_9407.h5")

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 9/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory

Model Evaluation After Fine Tuning

Training and Validation Plots

train_loss = history.history['loss']
train_acc = history.history['accuracy']
valid_loss = history.history['val_loss']
valid_acc = history.history['val_accuracy']

# Accuracy plots
plt.figure(figsize=(8, 4))
plt.plot(train_acc, color='green', linestyle='-', label='train accuracy')
plt.plot(valid_acc, color='blue', linestyle='-', label='val accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

# loss plots
plt.figure(figsize=(8, 4))
plt.plot(train_loss, color='orange', linestyle='-', label='train loss')
plt.plot(valid_loss, color='red', linestyle='-', label='val loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
Automatic saving failed. This file was updated remotely or in another tab.
plt.show() Show diff

y_pred = [] # store predicted labels


y_true = [] # store true labels

# iterate over the dataset


for image_batch, label_batch in val_ds: # use dataset.unbatch() with repeat
# append true labels
y_true.append(label_batch)
# compute predictions
preds = resnet_model.predict(image_batch)
# append predicted labels
y_pred.append(np.argmax(preds, axis = - 1))

# convert the true and predicted labels into tensors


correct_labels = tf.concat([item for item in y_true], axis = 0)
predicted_labels = tf.concat([item for item in y_pred], axis = 0)

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 10/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory

1/1 [==============================] - 1s 837ms/step


1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 39ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 45ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 38ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 41ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 36ms/step
1/1 [==============================] - 0s 35ms/step
1/1 [==============================] - 0s 40ms/step
1/1 [==============================] - 0s 43ms/step
1/1 [==============================] - 0s 31ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 0s 30ms/step
1/1 [==============================] - 0s 29ms/step
1/1 [==============================] - 0s 37ms/step
1/1 [==============================] - 0s 33ms/step
1/1 [==============================] - 0s 34ms/step
1/1 [==============================] - 0s 32ms/step
1/1 [==============================] - 1s 1s/step

cm = confusion_matrix(correct_labels, predicted_labels, normalize='true')


Automatic saving failed. This file was updated remotely or in another tab. Show diff
sns.heatmap(cm, annot=True, cmap='viridis', cbar=None)
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=60,fontsize=10,fontweight='bold')
plt.yticks(tick_marks, class_names,fontsize=10,rotation=0,fontweight='bold')
plt.title("Confusion Matrix",fontsize=20, fontweight='bold')
plt.ylabel("Actual Value", fontsize=16)
plt.xlabel("Predicted Value", fontsize=16)
plt.show()

print(classification_report(correct_labels, predicted_labels,target_names=class_names))

precision recall f1-score support

Healthy 0.97 0.99 0.98 141


Leaf_rust 0.90 0.89 0.90 163
Powdery_Mildew 0.95 1.00 0.97 151
Septoria_tritic_blotch 0.95 0.91 0.93 163
Strip_rust 0.92 0.90 0.91 153
Tanspot 0.96 0.96 0.96 139

accuracy 0.94 910


macro avg 0.94 0.94 0.94 910
weighted avg 0.94 0.94 0.94 910

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 11/12
2/8/23, 2:13 AM WLeaf Disease Classification_ResNet50.ipynb - Colaboratory

Testing Trained model

check 0s completed at 2:08 AM

Automatic saving failed. This file was updated remotely or in another tab. Show diff

https://colab.research.google.com/drive/1zGDYxQMqj0aijE6TtdVQHbRVSvfn0AdS#scrollTo=NmQLaeywDyD-&printMode=true 12/12

You might also like