You are on page 1of 28

Augmentasi 100 Augmentasi 10

Loss Accuracy Loss Accuracy


Epochs

Adam RMS Adadelt Adam RMS Adadelta Adam RMS Adadelt Adam RMS Adadelta
a a
1

1.7079 1.6215 1.7226 0.1883 0.1818 0.1883 0.2018 1.6500 1.6459 0.0964 0.3976 0.2195
2

1.8406 1.5968 1.9851 0.1883 0.2078 0.1948 0.4510 1.5387 1.7372 0.1446 0.3855 0.1341
3

1.9398 1.7613 1.9613 0.1883 0.1818 0.2143 0.5490 1.6755 1.8160 0.1807 0.3253 0.1341
4

1.9362 1.9491 1.8240 0.1818 0.1818 0.1818 0.6409 1.4515 1.8585 0.1084 0.4819 0.1341
5

1.9649 1.8562 1.9678 0.2013 0.2468 0.1753 0.7033 1.4744 1.8474 0.1687 0.5060 0.1463
6

1.7034 1.7610 2.2223 0.2987 0.2727 0.1623 0.7596 1.4210 1.7919 0.2530 0.5542 0.1585
7

1.3954 1.4752 2.2228 0.4545 0.4156 0.2013 0.7923 1.5128 1.7363 0.2651 0.4940 0.1829
8

1.1738 1.6158 2.1599 0.5584 0.3831 0.1364 0.8398 1.4759 1.7541 0.2530 0.4337 0.1707
9

1.0914 1.2059 2.2313 0.5714 0.5455 0.1753 0.8249 1.5641 1.8411 0.2651 0.5181 0.1707
10

1.1499 1.2138 2.2412 0.5584 0.5779 0.2273 0.8932 1.5813 1.8648 0.3253 0.4699 0.2317
Augmentasi 1000

Loss Accuracy

Epochs
Adam RMS Adadelta Adam RMS Adadelta

1
1.2757 1.7075 1.9729 0.5217 0.3204 0.2117

2
0.7366 0.7899 2.1887 0.7277 0.7243 0.2071

3
0.4222 0.7991 2.1763 0.8513 0.6968 0.2071

4
0.3950 0.4571 2.1909 0.8650 0.8547 0.2025

5
0.3216 0.3783 2.1721 0.8982 0.8810 0.2094

6
7 0.2953 0.3424 2.1935 0.8936 0.8924 0.2059

0.2849 0.5169 2.1659 0.9118 0.8295 0.2059


8

0.4779 0.2362 2.1820 0.8444 0.9233 0.2151


9

0.2367 0.6119 2.1538 0.9336 0.7632 0.2174


10

0.6955 0.5560 2.1534 0.7712 0.8272 0.2071


Augmentasi 100 Augmentasi 10

Loss Accuracy Loss Accuracy


Epochs

Adam RMS Adadelt Adam RMS Adadelta Adam RMS Adadelta Adam RMS Adadelta
a
1

1.6582 1.6135 1.6265 0.1765 0.1830 0.1948 1.6563 1.5976 1.5536 0.1325 0.2195 0.4699
2

1.7062 1.6055 1.6631 0.1830 0.2352 0.1948 1.5842 1.5927 1.5174 0.1325 0.3293 0.4940
3

1.7428 1.6151 1.7436 0.1830 0.2222 0.1818 1.7744 1.5453 1.5200 0.1325 0.2439 0.4940
4

1.7622 1.6752 1.7980 0.1830 0.2222 0.1688 1.8561 1.5125 1.5149 0.1325 0.3415 0.4940
5

1.7042 1.7381 1.7891 0.1830 0.2026 0.1364 1.9239 1.5365 1.5149 0.1325 0.3537 0.4819
6

1.7580 1.7381 1.7416 0.1895 0.1830 0.1623 2.0022 1.5078 1.5281 0.1325 0.4024 0.4940
7

1.7778 1.7426 1.7489 0.1569 0.2157 0.1948 2.0933 1.4690 1.5414 0.1566 0.3902 0.5060
8

1.8197 1.9058 1.8183 0.2157 0.1830 0.2078 2.0952 1.4531 1.5416 0.1446 0.4146 0.5060
9

1.9210 2.0161 1.8640 0.2418 0.1961 0.1818 2.0318 1.2826 1.5406 0.1687 0.5122 0.5181
10

1.7341 1.9107 1.9569 0.2876 0.2549 0.2208 1.8636 1.3862 1.5408 0.1687 0.4512 0.5060
Augmentasi 1000

Loss Accuracy

Epochs
Adam RMS Adadelta Adam RMS Adadelt
a

1
2.1696 2.0924 1.6781 0.1977 0.2002 0.2185

2
1.8438 1.8836 1.9805 0.2903 0.3444 0.1819

3
0.6541 0.5715 2.1301 0.7589 0.8146 0.2037

4
0.3937 0.2810 2.2271 0.8594 0.9130 0.2048

5
0.2672 0.5184 2.1739 0.9143 0.8204 0.2185

6
7 0.1917 0.3023 2.1718 0.9371 0.8959 0.2265

0.1839 0.5667 2.1705 0.9394 0.8124 0.2174


8

0.3926 0.6449 2.1582 0.8629 0.7860 0.2162


9

0.1942 0.3838 2.1500 0.9371 0.8822 0.2208


10

0.2694 0.2463 2.1512 0.9109 0.9188 0.2185


Berdasarkan akurasi

Adadelta RMS Adam

Mean 0.242728 Mean 0.476968943 Mean 0.41183


Standard Standard
Error 0.015141 Standard Error 0.032842239 Error 0.040217347
Median 0.2065 Median 0.408536583 Median 0.2474
Mode 0.1948 Mode 0.1818 Mode 0.1325
Standard Standard Standard
Deviation 0.11728 Deviation 0.254394889 Deviation 0.311522227
Sample Sample
Variance 0.013755 Sample Variance 0.06471676 Variance 0.097046098
Kurtosis 1.124483 Kurtosis -1.17664266 Kurtosis -1.23867344
Skewness 1.646075 Skewness 0.521392647 Skewness 0.734981109
Range 0.384 Range 0.7415 Range 0.843
Minimum 0.1341 Minimum 0.1818 Minimum 0.0964

Maximum 0.5181 Maximum 0.9233 Maximum 0.9394


Sum 14.5637 Sum 28.61813655 Sum 24.7098
Count 60 Count 60 Count 60

Berdasarkan loss

Adadelta RMS Adam


Mean 1.900595 Mean 1.295531 Mean 1.178765
Standard Standard Standard
Error 0.03206 Error 0.070902 Error 0.088018

Median 1.86125 Median 1.51266 Median 1.22475


Mode 1.5149 Mode 1.7381 Mode #N/A
Standard Standard Standard
Deviation 0.248334 Deviation 0.549201 Deviation 0.681781
Sample Sample Sample
Variance 0.06167 Variance 0.301622 Variance 0.464825

Kurtosis -1.41065 Kurtosis -0.8699 Kurtosis -1.64846


Skewness -0.11068 Skewness -0.76554 Skewness -0.13948
Range 0.7263 Range 1.8562 Range 1.9857
Minimum 1.5149 Minimum 0.2362 Minimum 0.1839

Maximum 2.2412 Maximum 2.0924 Maximum 2.1696

Sum 114.0357 Sum 77.73188 Sum 70.7259


Count 60 Count 60 Count 60
!pip install tensorflow==2.9.1
# Libraries of commonly used
from PIL import Image
import pandas as pd
import numpy as np
import random
from random import sample, shuffle
import cv2, os, itertools, shutil
from tqdm.notebook import tqdm as tq
from google.colab import files

# Libraries for visualization


%matplotlib inline
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import seaborn as sns

# Libraries for image data processing


import cv2
from PIL import Image
import skimage
from skimage import io
from skimage.transform import resize
from skimage.transform import rotate, AffineTransform, warp
from skimage import img_as_ubyte
from skimage.exposure import adjust_gamma
from skimage.util import random_noise

# Libraries for modeling and evaluation


from sklearn.metrics import confusion_matrix,classification_report
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.applications import EfficientNetB7
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout,BatchNo
rmalization
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras import Model, layers

import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
# Crate a dictionary storing images for each classes in the data train
Nutrient_dataset = {}

# Define source path


path = "NutrientDataset/"
for i in os.listdir(path):
Nutrient_dataset[i] = os.listdir(os.path.join(path, i))

# Randomly display 5 images under each of the 6 categories from the training data.
# You will see different images each time.
fig, axs = plt.subplots(len(Nutrient_dataset.keys()), 5, figsize = (15, 15))
for i, item in enumerate(os.listdir(path)):
images = sample(Nutrient_dataset[item], 5)
for j, image in enumerate(images):
img = Image.open(os.path.join(path, item, image))
axs[i, j].imshow(img)
axs[i, j].set(xlabel = item, xticks = [], yticks = [])

fig.tight_layout()

# Define source path


path = "NutrientDataset/"

# Create a list that stores data for each filenames, fullpaths, and labels
filename = []
labels = []
fullpath = []

# Get data image filenames, filepaths, labels one by one with looping, and store them as dataf
rame
for path, subdirs, files in os.walk(path):
for name in files:
fullpath.append(os.path.join(path, name))
labels.append(path.split('/')[-1])
filename.append(name)

distribution_train = pd.DataFrame({"path":fullpath,'file_name':filename,"labels":labels})

# Plot the distribution of images across the classes


Label = distribution_train['labels']
# plt.figure(figsize = (6,6))
# sns.set_style("darkgrid")
# plot_data = sns.countplot(Label)
# Created a function to perform anti clockwise rotation
def anticlockwise_rotation(img):
img = cv2.cvtColor(img, 0)
img = cv2.resize(img, (224,224))
angle= random.randint(0,180)
return rotate(img, angle)

# Create a function to perform clockwise rotation


def clockwise_rotation(img):
img = cv2.cvtColor(img, 0)
img = cv2.resize(img, (224,224))
angle= random.randint(0,180)
return rotate(img, -angle)

# Create a function to flip images up and down


def flip_up_down(img):
img = cv2.cvtColor(img, 0)
img = cv2.resize(img, (224,224))
return np.flipud(img)

# Create a function to give a bright effect to the image


def add_brightness(img):
img = cv2.cvtColor(img, 0)
img = cv2.resize(img, (224,224))
img = adjust_gamma(img, gamma=0.5,gain=1)
return img

# Create a function to give the image a blur/blur effect


def blur_image(img):
img = cv2.cvtColor(img, 0)
img = cv2.resize(img, (224,224))
return cv2.GaussianBlur(img, (9,9),0)

# Create a function to give the image a sheared effect


def sheared(img):
img = cv2.cvtColor(img, 0)
img = cv2.resize(img, (224,224))
transform = AffineTransform(shear=0.2)
shear_image = warp(img, transform, mode="wrap")
return shear_image

# Create a function to perform warp shifts


def warp_shift(img):
img = cv2.cvtColor(img, 0)
img = cv2.resize(img, (224,224))
transform = AffineTransform(translation=(0,40))
warp_image = warp(img, transform, mode="wrap")
return warp_image
# Create a transformation variable that will hold all the preprocessing processes that have bee
n made above
transformations = { 'rotate anticlockwise': anticlockwise_rotation,
'rotate clockwise': clockwise_rotation,
'warp shift': warp_shift,
'blurring image': blur_image,
'add brightness' : add_brightness,
'flip up down': flip_up_down,
'shear image': sheared
}

images_path="NutrientDataset/Iron" # Path for the original image


augmented_path="NutrientDataset/Iron" # Path to put the augmented image
images=[] # To save images that have been preprocessed from the folder

# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))

# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1

while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image
while n <= transformation_count:
key = random.choice(list(transformations)) # Randomly select and call methods
transformed_image = transformations[key](original_image)
n=n+1

new_image_path= "%s/augmented_image_%s.jpg" %(augmented_path, i)


transformed_image = img_as_ubyte(transformed_image) # Convert images to unsigned
byte format, with values in [0, 255]
cv2.imwrite(new_image_path, transformed_image) # Save the result of the augmentatio
n transformation on the image into the specified path
i =i+1
except ValueError as e:
print('could not read the',image ,':',e,'hence skipping it.')
# Create a transformation variable that will hold all the preprocessing processes that have bee
n made above
transformations = { 'rotate anticlockwise': anticlockwise_rotation,
'rotate clockwise': clockwise_rotation,
'warp shift': warp_shift,
'blurring image': blur_image,
'add brightness' : add_brightness,
'flip up down': flip_up_down,
'shear image': sheared
}

images_path="NutrientDataset/Magnesium" # Path for the original image


augmented_path="NutrientDataset/Magnesium" # Path to put the augmented image
images=[] # To save images that have been preprocessed from the folder

# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))

# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1

while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image

while n <= transformation_count:


key = random.choice(list(transformations)) # Randomly select and call methods
transformed_image = transformations[key](original_image)
n=n+1

new_image_path= "%s/augmented_image_%s.jpg" %(augmented_path, i)


transformed_image = img_as_ubyte(transformed_image) # Convert images to unsigned
byte format, with values in [0, 255]
cv2.imwrite(new_image_path, transformed_image) # Save the result of the augmentatio
n transformation on the image into the specified path
i =i+1
except ValueError as e:
print('could not read the',image ,':',e,'hence skipping it.')
# Remove file image with format .webp in directory path Magnesium
directory = "NutrientDataset/Magnesium"

for file_name in os.listdir(directory):


if file_name.endswith(".webp"):
os.remove(os.path.join(directory, file_name))
# Create a transformation variable that will hold all the preprocessing processes that have bee
n made above
transformations = { 'rotate anticlockwise': anticlockwise_rotation,
'rotate clockwise': clockwise_rotation,
'warp shift': warp_shift,
'blurring image': blur_image,
'add brightness' : add_brightness,
'flip up down': flip_up_down,
'shear image': sheared
}

images_path="NutrientDataset/Nitrogen" # Path for the original image


augmented_path="NutrientDataset/Nitrogen" # Path to put the augmented image
images=[] # To save images that have been preprocessed from the folder

# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))

# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1

while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image

while n <= transformation_count:


key = random.choice(list(transformations)) # Randomly select and call methods
transformed_image = transformations[key](original_image)
n=n+1

new_image_path= "%s/augmented_image_%s.jpg" %(augmented_path, i)


transformed_image = img_as_ubyte(transformed_image) # Convert images to unsigned
byte format, with values in [0, 255]
cv2.imwrite(new_image_path, transformed_image) # Save the result of the augmentatio
n transformation on the image into the specified path
i =i+1
except ValueError as e:
print('could not read the',image ,':',e,'hence skipping it.')
# Create a transformation variable that will hold all the preprocessing processes that have bee
n made above
transformations = { 'rotate anticlockwise': anticlockwise_rotation,
'rotate clockwise': clockwise_rotation,
'warp shift': warp_shift,
'blurring image': blur_image,
'add brightness' : add_brightness,
'flip up down': flip_up_down,
'shear image': sheared
}

images_path="NutrientDataset/Potassium" # Path for the original image


augmented_path="NutrientDataset/Potassium" # Path to put the augmented image
images=[] # To save images that have been preprocessed from the folder

# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))

# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1

while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image

while n <= transformation_count:


key = random.choice(list(transformations)) # Randomly select and call methods
transformed_image = transformations[key](original_image)
n=n+1

new_image_path= "%s/augmented_image_%s.jpg" %(augmented_path, i)


transformed_image = img_as_ubyte(transformed_image) # Convert images to unsigned
byte format, with values in [0, 255]
cv2.imwrite(new_image_path, transformed_image) # Save the result of the augmentatio
n transformation on the image into the specified path
i =i+1
except ValueError as e:
print('could not read the',image ,':',e,'hence skipping it.')
# Create a transformation variable that will hold all the preprocessing processes that have bee
n made above
transformations = { 'rotate anticlockwise': anticlockwise_rotation,
'rotate clockwise': clockwise_rotation,
'warp shift': warp_shift,
'blurring image': blur_image,
'add brightness' : add_brightness,
'flip up down': flip_up_down,
'shear image': sheared
}
images_path="NutrientDataset/Zinc" # Path for the original image
augmented_path="NutrientDataset/Zinc" # Path to put the augmented image
images=[] # To save images that have been preprocessed from the folder

# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))

# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1

while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image

while n <= transformation_count:


key = random.choice(list(transformations)) # Randomly select and call methods
transformed_image = transformations[key](original_image)
n=n+1

new_image_path= "%s/augmented_image_%s.jpg" %(augmented_path, i)


transformed_image = img_as_ubyte(transformed_image) # Convert images to unsigned
byte format, with values in [0, 255]
cv2.imwrite(new_image_path, transformed_image) # Save the result of the augmentatio
n transformation on the image into the specified path
i =i+1
except ValueError as e:
print('could not read the',image ,':',e,'hence skipping it.')
# Define source path
path = "NutrientDataset/"

# Get data image filenames, filepaths, labels one by one with looping, and store them as dataf
rame
filename = []
labels = []
fullpath = []
for path, subdirs, files in os.walk(path):
for name in files:
fullpath.append(os.path.join(path, name))
labels.append(path.split('/')[-1])
filename.append(name)

# Memasukan variabel yang sudah dikumpulkan pada looping di atas menjadi sebuah datafra
me
df_nutrient = pd.DataFrame({"path":fullpath,'file_name':filename,"labels":labels})
# Melihat jumlah data gambar pada masing-masing label
df_nutrient.groupby(['labels']).size()
# Variables used in this data separation where variable x = data path and y = data labels
X_nutrient = df_nutrient['path']
y_nutrient = df_nutrient['labels']

# Split the initial dataset into training and testing data


# Use proportion 80% for training data and 20% for testing data
X_train, X_test, y_train, y_test = train_test_split(
X_nutrient, y_nutrient, test_size=0.2, random_state=42)
# Insert into each dataframe, X_train and y_train which obtained from the results of the splitti
ng dataset
df_train_nutrient = pd.DataFrame({'path':X_train,'labels':y_train,'set':'train'})
df_test_nutrient = pd.DataFrame({'path':X_test,'labels':y_test,'set':'test'})
# Print the results above to see the training and test length data
print('Jumlah data training', len(df_train_nutrient))
print('Jumlah data testing', len(df_test_nutrient))
# Look at the proportions in each set, is it ok or still want to change it
df_all_nutrient = df_train_nutrient.append([df_test_nutrient]).reset_index(drop=1)\

print('===================================================== \n')
print(df_all_nutrient.groupby(['set','labels']).size(),'\n')

print('===================================================== \n')

# Check the sample data


df_all_nutrient.sample(5)
# Call the original dataset which contains all image data according to the label
ORIGINAL_DIR = "NutrientDataset/"
# Create a Dataset variable, which will later store data that has been splitting into training and
testing data
FINAL_DIR = "dataset-final/"
for index, row in tq(df_all_nutrient.iterrows()):

# Filepath detection
file_path = row['path']
if os.path.exists(file_path) == False:
file_path = os.path.join(ORIGINAL_DIR,row['labels'],row['image'].split('.')[0])

# Create a folder destination directory


if os.path.exists(os.path.join(FINAL_DIR,row['set'],row['labels'])) == False:
os.makedirs(os.path.join(FINAL_DIR,row['set'],row['labels']))

# Define file destination


destination_file_name = file_path.split('/')[-1]
file_dest = os.path.join(FINAL_DIR,row['set'],row['labels'],destination_file_name)
# Copy files from source to destination
if os.path.exists(file_dest) == False:
shutil.copy2(file_path,file_dest)
# Define training and test directories
TRAINING_DIR = "dataset-final/train/"
TESTING_DIR = "dataset-final/test/"

# Tentukan default dimensi tinggi dan lebar gambar


img_width, img_height = 224, 224

# Jumlah epoch yang dilakukan untuk proses training


epochs = 10

# Ukuran batch yang digunakan pada saat ImageDataGenerator dan predict model
batch_size = 16

# Channel gambar yang digunakan 1 = grayscale, 3 = rgb


channel = 3
# Create Image Data Generator for data training and validation
# Data validation comes from 20% proportion of training data
datagen = ImageDataGenerator(rescale=1. / 255, validation_split=0.2)
# Create Image Data Generator for data testing
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = datagen.flow_from_directory(TRAINING_DIR,
batch_size=batch_size,
color_mode='rgb',
target_size=(img_width,img_height),
class_mode='categorical',
subset='training',
shuffle=True)

validation_generator = datagen.flow_from_directory(TRAINING_DIR,
batch_size=batch_size,
color_mode='rgb',
target_size=(img_width,img_height),
class_mode='categorical',
subset='validation',
shuffle=False)

test_generator = test_datagen.flow_from_directory(TESTING_DIR,
batch_size=batch_size,
color_mode='rgb',
target_size=(img_width,img_height),
class_mode='categorical',
shuffle=False)
conv_base_effnet = EfficientNetB7(include_top=False, weights='imagenet', input_shape=(im
g_width, img_height, channel))

for layer in conv_base_effnet.layers:


layers.trainable = True
x = conv_base_effnet.output
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(64, activation='relu')(x)
x = BatchNormalization()(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(64, activation='relu')(x)
x = BatchNormalization()(x)
predictions = layers.Dense(5, activation='softmax')(x)
model_effnet = Model(conv_base_effnet.input, predictions)
# Compile model
model_effnet.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
metrics=['accuracy'])

# Summary of the Model Architecture


model_effnet.summary()
%time

# Callback Early Stopping


# early_stop = EarlyStopping(monitor='val_loss', patience=2)

# Fitting / training model


history_effnet = model_effnet.fit(train_generator,
epochs=epochs,
batch_size=batch_size,
# callbacks=[early_stop],
validation_data=validation_generator)
# Membuat grafik plot hasil akurasi training validation dan loss training validation
acc = history_effnet.history['accuracy']
val_acc = history_effnet.history['val_accuracy']
loss = history_effnet.history['loss']
val_loss = history_effnet.history['val_loss']
epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training acc')


plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation EfficientNetB7 accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend()
plt.figure()

plt.plot(epochs, loss, 'r', label='Training loss')


plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation EfficientNetB7 V2 loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend()
plt.show()
# Prediction using testing data
test_generator.reset()
preds_effnet = model_effnet.predict(test_generator,verbose=0)
test_labels = [np.argmax(pred) for pred in preds_effnet]

cm = pd.DataFrame(data=confusion_matrix(test_generator.classes, test_labels,
labels=[0, 1, 2, 3, 4]),
index=['Actual Iron', 'Actual Magnesium',
'Actual Nitrogen', 'Actual Potassium',
'Actual Zinc'],
columns=['Predicted Iron', 'Predicted Magnesium',
'Predicted Nitrogen', 'Predicted Potassium',
'Predicted Zinc'
])
sns.heatmap(cm,annot=True,fmt="d")
plt.show()
# Print Classification Report
print(classification_report(y_true=test_generator.classes,y_pred=test_labels,
target_names =['Iron','Magnesium','Nitrogen',
'Potassium', 'Zinc'], digits=4))
# Save model
model_effnet.save('Nutrient-Model.h5')
# Display result of prediction testing data
probabilities = model_effnet.predict(test_generator, 30, verbose=0)
class_names = list(test_generator.class_indices.keys())

for index, probability in enumerate(probabilities):


image_path = TESTING_DIR + "/" + test_generator.filenames[index]
img = mpimg.imread(image_path)
plt.imshow(img)

# Get the top predicted class and its probability


top_class_index = np.argmax(probability)
top_class_prob = probability[top_class_index]
top_class_name = class_names[top_class_index]
# Set the title to display the top predicted class and its probability
plt.title(f"{top_class_name}: {top_class_prob:.2f}")

plt.show()
# Display result of prediction new data
model = tf.keras.models.load_model('Nutrient-Model.h5')

uploaded = files.upload()

for fn in uploaded.keys():
path = fn
img = image.load_img(path, color_mode="rgb", target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img/255

images = np.vstack([img])

classes = list(train_generator.class_indices.keys())
pred = model.predict(images, verbose=0)
index = np.argmax(pred[0])
probability= round(pred[0][index]*100, 2)

plt.figure(figsize=(6,6))
plt.axis('off')
title = f'Predict : {probability}% {classes[index]}'
plt.title(title)
plt.imshow(np.squeeze(images))
plt.show()
# Display result of prediction new data
model = tf.keras.models.load_model('Nutrient-Model.h5')

uploaded = files.upload()
for fn in uploaded.keys():
path = fn
img = image.load_img(path, color_mode="rgb", target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img/255

images = np.vstack([img])

classes = list(train_generator.class_indices.keys())
pred = model.predict(images, verbose=0)
index = np.argmax(pred[0])
probability= round(pred[0][index]*100, 2)

plt.figure(figsize=(6,6))
plt.axis('off')
title = f'Predict : {probability}% {classes[index]}'
plt.title(title)
plt.imshow(np.squeeze(images))
plt.show()
# Display result of prediction new data
model = tf.keras.models.load_model('Nutrient-Model.h5')

uploaded = files.upload()

for fn in uploaded.keys():
path = fn
img = image.load_img(path, color_mode="rgb", target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img/255

images = np.vstack([img])
classes = list(train_generator.class_indices.keys())
pred = model.predict(images, verbose=0)
index = np.argmax(pred[0])
probability= round(pred[0][index]*100, 2)

plt.figure(figsize=(6,6))
plt.axis('off')
title = f'Predict : {probability}% {classes[index]}'
plt.title(title)
plt.imshow(np.squeeze(images))
plt.show()

You might also like