Professional Documents
Culture Documents
Adam RMS Adadelt Adam RMS Adadelta Adam RMS Adadelt Adam RMS Adadelta
a a
1
1.7079 1.6215 1.7226 0.1883 0.1818 0.1883 0.2018 1.6500 1.6459 0.0964 0.3976 0.2195
2
1.8406 1.5968 1.9851 0.1883 0.2078 0.1948 0.4510 1.5387 1.7372 0.1446 0.3855 0.1341
3
1.9398 1.7613 1.9613 0.1883 0.1818 0.2143 0.5490 1.6755 1.8160 0.1807 0.3253 0.1341
4
1.9362 1.9491 1.8240 0.1818 0.1818 0.1818 0.6409 1.4515 1.8585 0.1084 0.4819 0.1341
5
1.9649 1.8562 1.9678 0.2013 0.2468 0.1753 0.7033 1.4744 1.8474 0.1687 0.5060 0.1463
6
1.7034 1.7610 2.2223 0.2987 0.2727 0.1623 0.7596 1.4210 1.7919 0.2530 0.5542 0.1585
7
1.3954 1.4752 2.2228 0.4545 0.4156 0.2013 0.7923 1.5128 1.7363 0.2651 0.4940 0.1829
8
1.1738 1.6158 2.1599 0.5584 0.3831 0.1364 0.8398 1.4759 1.7541 0.2530 0.4337 0.1707
9
1.0914 1.2059 2.2313 0.5714 0.5455 0.1753 0.8249 1.5641 1.8411 0.2651 0.5181 0.1707
10
1.1499 1.2138 2.2412 0.5584 0.5779 0.2273 0.8932 1.5813 1.8648 0.3253 0.4699 0.2317
Augmentasi 1000
Loss Accuracy
Epochs
Adam RMS Adadelta Adam RMS Adadelta
1
1.2757 1.7075 1.9729 0.5217 0.3204 0.2117
2
0.7366 0.7899 2.1887 0.7277 0.7243 0.2071
3
0.4222 0.7991 2.1763 0.8513 0.6968 0.2071
4
0.3950 0.4571 2.1909 0.8650 0.8547 0.2025
5
0.3216 0.3783 2.1721 0.8982 0.8810 0.2094
6
7 0.2953 0.3424 2.1935 0.8936 0.8924 0.2059
Adam RMS Adadelt Adam RMS Adadelta Adam RMS Adadelta Adam RMS Adadelta
a
1
1.6582 1.6135 1.6265 0.1765 0.1830 0.1948 1.6563 1.5976 1.5536 0.1325 0.2195 0.4699
2
1.7062 1.6055 1.6631 0.1830 0.2352 0.1948 1.5842 1.5927 1.5174 0.1325 0.3293 0.4940
3
1.7428 1.6151 1.7436 0.1830 0.2222 0.1818 1.7744 1.5453 1.5200 0.1325 0.2439 0.4940
4
1.7622 1.6752 1.7980 0.1830 0.2222 0.1688 1.8561 1.5125 1.5149 0.1325 0.3415 0.4940
5
1.7042 1.7381 1.7891 0.1830 0.2026 0.1364 1.9239 1.5365 1.5149 0.1325 0.3537 0.4819
6
1.7580 1.7381 1.7416 0.1895 0.1830 0.1623 2.0022 1.5078 1.5281 0.1325 0.4024 0.4940
7
1.7778 1.7426 1.7489 0.1569 0.2157 0.1948 2.0933 1.4690 1.5414 0.1566 0.3902 0.5060
8
1.8197 1.9058 1.8183 0.2157 0.1830 0.2078 2.0952 1.4531 1.5416 0.1446 0.4146 0.5060
9
1.9210 2.0161 1.8640 0.2418 0.1961 0.1818 2.0318 1.2826 1.5406 0.1687 0.5122 0.5181
10
1.7341 1.9107 1.9569 0.2876 0.2549 0.2208 1.8636 1.3862 1.5408 0.1687 0.4512 0.5060
Augmentasi 1000
Loss Accuracy
Epochs
Adam RMS Adadelta Adam RMS Adadelt
a
1
2.1696 2.0924 1.6781 0.1977 0.2002 0.2185
2
1.8438 1.8836 1.9805 0.2903 0.3444 0.1819
3
0.6541 0.5715 2.1301 0.7589 0.8146 0.2037
4
0.3937 0.2810 2.2271 0.8594 0.9130 0.2048
5
0.2672 0.5184 2.1739 0.9143 0.8204 0.2185
6
7 0.1917 0.3023 2.1718 0.9371 0.8959 0.2265
Berdasarkan loss
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
# Crate a dictionary storing images for each classes in the data train
Nutrient_dataset = {}
# Randomly display 5 images under each of the 6 categories from the training data.
# You will see different images each time.
fig, axs = plt.subplots(len(Nutrient_dataset.keys()), 5, figsize = (15, 15))
for i, item in enumerate(os.listdir(path)):
images = sample(Nutrient_dataset[item], 5)
for j, image in enumerate(images):
img = Image.open(os.path.join(path, item, image))
axs[i, j].imshow(img)
axs[i, j].set(xlabel = item, xticks = [], yticks = [])
fig.tight_layout()
# Create a list that stores data for each filenames, fullpaths, and labels
filename = []
labels = []
fullpath = []
# Get data image filenames, filepaths, labels one by one with looping, and store them as dataf
rame
for path, subdirs, files in os.walk(path):
for name in files:
fullpath.append(os.path.join(path, name))
labels.append(path.split('/')[-1])
filename.append(name)
distribution_train = pd.DataFrame({"path":fullpath,'file_name':filename,"labels":labels})
# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))
# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1
while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image
while n <= transformation_count:
key = random.choice(list(transformations)) # Randomly select and call methods
transformed_image = transformations[key](original_image)
n=n+1
# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))
# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1
while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image
# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))
# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1
while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image
# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))
# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1
while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image
# Read image name from folder and add path into "images" array
for im in os.listdir(images_path):
images.append(os.path.join(images_path,im))
# The number of images that will be added with the results of the augmentation transformatio
n, the number is adjusted according to needs
# Variable to iterate up to a predefined number of images_to_generate
images_to_generate=1000
i=1
while i<=images_to_generate:
image=random.choice(images)
try:
original_image = io.imread(image)
transformed_image=None
n=0 # Variables to iterate up to the number of transformations to apply
transformation_count = random.randint(1, len(transformations)) # Choose the number of
random transformations to apply to the image
# Get data image filenames, filepaths, labels one by one with looping, and store them as dataf
rame
filename = []
labels = []
fullpath = []
for path, subdirs, files in os.walk(path):
for name in files:
fullpath.append(os.path.join(path, name))
labels.append(path.split('/')[-1])
filename.append(name)
# Memasukan variabel yang sudah dikumpulkan pada looping di atas menjadi sebuah datafra
me
df_nutrient = pd.DataFrame({"path":fullpath,'file_name':filename,"labels":labels})
# Melihat jumlah data gambar pada masing-masing label
df_nutrient.groupby(['labels']).size()
# Variables used in this data separation where variable x = data path and y = data labels
X_nutrient = df_nutrient['path']
y_nutrient = df_nutrient['labels']
print('===================================================== \n')
print(df_all_nutrient.groupby(['set','labels']).size(),'\n')
print('===================================================== \n')
# Filepath detection
file_path = row['path']
if os.path.exists(file_path) == False:
file_path = os.path.join(ORIGINAL_DIR,row['labels'],row['image'].split('.')[0])
# Ukuran batch yang digunakan pada saat ImageDataGenerator dan predict model
batch_size = 16
train_generator = datagen.flow_from_directory(TRAINING_DIR,
batch_size=batch_size,
color_mode='rgb',
target_size=(img_width,img_height),
class_mode='categorical',
subset='training',
shuffle=True)
validation_generator = datagen.flow_from_directory(TRAINING_DIR,
batch_size=batch_size,
color_mode='rgb',
target_size=(img_width,img_height),
class_mode='categorical',
subset='validation',
shuffle=False)
test_generator = test_datagen.flow_from_directory(TESTING_DIR,
batch_size=batch_size,
color_mode='rgb',
target_size=(img_width,img_height),
class_mode='categorical',
shuffle=False)
conv_base_effnet = EfficientNetB7(include_top=False, weights='imagenet', input_shape=(im
g_width, img_height, channel))
cm = pd.DataFrame(data=confusion_matrix(test_generator.classes, test_labels,
labels=[0, 1, 2, 3, 4]),
index=['Actual Iron', 'Actual Magnesium',
'Actual Nitrogen', 'Actual Potassium',
'Actual Zinc'],
columns=['Predicted Iron', 'Predicted Magnesium',
'Predicted Nitrogen', 'Predicted Potassium',
'Predicted Zinc'
])
sns.heatmap(cm,annot=True,fmt="d")
plt.show()
# Print Classification Report
print(classification_report(y_true=test_generator.classes,y_pred=test_labels,
target_names =['Iron','Magnesium','Nitrogen',
'Potassium', 'Zinc'], digits=4))
# Save model
model_effnet.save('Nutrient-Model.h5')
# Display result of prediction testing data
probabilities = model_effnet.predict(test_generator, 30, verbose=0)
class_names = list(test_generator.class_indices.keys())
plt.show()
# Display result of prediction new data
model = tf.keras.models.load_model('Nutrient-Model.h5')
uploaded = files.upload()
for fn in uploaded.keys():
path = fn
img = image.load_img(path, color_mode="rgb", target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img/255
images = np.vstack([img])
classes = list(train_generator.class_indices.keys())
pred = model.predict(images, verbose=0)
index = np.argmax(pred[0])
probability= round(pred[0][index]*100, 2)
plt.figure(figsize=(6,6))
plt.axis('off')
title = f'Predict : {probability}% {classes[index]}'
plt.title(title)
plt.imshow(np.squeeze(images))
plt.show()
# Display result of prediction new data
model = tf.keras.models.load_model('Nutrient-Model.h5')
uploaded = files.upload()
for fn in uploaded.keys():
path = fn
img = image.load_img(path, color_mode="rgb", target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img/255
images = np.vstack([img])
classes = list(train_generator.class_indices.keys())
pred = model.predict(images, verbose=0)
index = np.argmax(pred[0])
probability= round(pred[0][index]*100, 2)
plt.figure(figsize=(6,6))
plt.axis('off')
title = f'Predict : {probability}% {classes[index]}'
plt.title(title)
plt.imshow(np.squeeze(images))
plt.show()
# Display result of prediction new data
model = tf.keras.models.load_model('Nutrient-Model.h5')
uploaded = files.upload()
for fn in uploaded.keys():
path = fn
img = image.load_img(path, color_mode="rgb", target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = img/255
images = np.vstack([img])
classes = list(train_generator.class_indices.keys())
pred = model.predict(images, verbose=0)
index = np.argmax(pred[0])
probability= round(pred[0][index]*100, 2)
plt.figure(figsize=(6,6))
plt.axis('off')
title = f'Predict : {probability}% {classes[index]}'
plt.title(title)
plt.imshow(np.squeeze(images))
plt.show()