You are on page 1of 3

import numpy as np # linear algebra

import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)


import os
import itertools
import os, stat, time
from os.path import dirname as up
import shutil
import glob
from PIL import Image
from glob import glob
data = glob('/kaggle/input/breast-histopathology-images/**/*.png', recursive=True)
import cv2
import matplotlib.pyplot as plt
for i in data[:5]:
img=cv2.imread(i)
img_1=cv2.resize(img,(200,200))
plt.imshow(img_1,cmap='binary')
plt.show()

images=[]
labels=[]
for i in data[:15000]:
if i.endswith('.png'):
label=i[-5]
img=cv2.imread(i)
img_1=cv2.resize(img,(100,100))
images.append(img_1)
labels.append(label)
x=np.stack(images)
from tensorflow.keras.utils import to_categorical
y=to_categorical(labels)
#normalize the data
x=x/255
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=0,test_size=0.2)
from tensorflow.keras.layers import
Dense,Dropout,BatchNormalization,Conv2D,MaxPool2D,Flatten
from tensorflow.keras.models import Sequential
from tensorflow.keras import regularizers
from tensorflow.keras import layers
from tensorflow.keras.applications import EfficientNetB0
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras.models import Sequential
from tensorflow.keras import models, layers
def modelEfficientNetB0():

model = models.Sequential()
model.add(EfficientNetB0(include_top = False, weights = "imagenet",
input_shape=(100,100, 3)))
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(2, activation = "sigmoid"))

return model
model = modelEfficientNetB0()
model.summary()
Downloading data from https://storage.googleapis.com/keras-
applications/efficientnetb0_notop.h5
16711680/16705208 [==============================] - 0s 0us/step
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
efficientnetb0 (Functional) (None, 4, 4, 1280) 4049571
_________________________________________________________________
global_average_pooling2d (Gl (None, 1280) 0
_________________________________________________________________
dense (Dense) (None, 2) 2562
=================================================================
Total params: 4,052,133
Trainable params: 4,010,110
Non-trainable params: 42,023
_________________________________________________________________
from keras.optimizers import Adam
opt = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0,
amsgrad=False)
model.compile(optimizer=opt,loss='binary_crossentropy',metrics=['acc'])
history=model.fit(x,y,epochs= 8 ,validation_split=0.4,batch_size=56)
Epoch 1/8
161/161 [==============================] - 43s 150ms/step - loss: 0.6082 - acc:
0.7280 - val_loss: 0.6973 - val_acc: 0.6933
Epoch 2/8
161/161 [==============================] - 20s 121ms/step - loss: 0.4739 - acc:
0.8212 - val_loss: 0.6674 - val_acc: 0.6792
Epoch 3/8
161/161 [==============================] - 19s 121ms/step - loss: 0.4062 - acc:
0.8434 - val_loss: 0.6426 - val_acc: 0.6930
Epoch 4/8
161/161 [==============================] - 20s 123ms/step - loss: 0.3651 - acc:
0.8559 - val_loss: 0.6125 - val_acc: 0.6997
Epoch 5/8
161/161 [==============================] - 20s 123ms/step - loss: 0.3221 - acc:
0.8741 - val_loss: 0.5842 - val_acc: 0.6960
Epoch 6/8
161/161 [==============================] - 20s 123ms/step - loss: 0.3066 - acc:
0.8774 - val_loss: 0.5893 - val_acc: 0.6948
Epoch 7/8
161/161 [==============================] - 20s 124ms/step - loss: 0.2772 - acc:
0.8927 - val_loss: 0.5601 - val_acc: 0.7118
Epoch 8/8
161/161 [==============================] - 19s 120ms/step - loss: 0.2672 - acc:
0.8941 - val_loss: 0.6020 - val_acc: 0.6815
loss,accuracy=model.evaluate(x_test,y_test)
94/94 [==============================] - 2s 20ms/step - loss: 0.5895 - acc: 0.6980
plt.figure(figsize=(12,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend(['train_data','test_data'])
plt.title('loss analysis')
plt.show()

plt.figure(figsize=(12,5))
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend(['train_data','test_data'])
plt.title('accuracy analysis')
plt.show()

IMG = Image.open('../input/breast-histopathology-
images/10253/0/10253_idx5_x1001_y1101_class0.png')
print(type(IMG))
IMG = IMG.resize((100, 100))
IMG = np.array(IMG)
print('po array = {}'.format(IMG.shape))
IMG = np.true_divide(IMG, 255)
IMG = IMG.reshape(-1,100, 100,3)
print(type(IMG), IMG.shape)
predictions = model.predict_classes(IMG)
print(model)
predictions_c = model.predict_classes(IMG)
print(predictions, predictions_c)
model.predict_classes(IMG)
model.predict(IMG)
print(predictions_c)
<class 'PIL.PngImagePlugin.PngImageFile'>
po array = (100, 100, 3)
<class 'numpy.ndarray'> (1, 100, 100, 3)
/opt/conda/lib/python3.7/site-
packages/tensorflow/python/keras/engine/sequential.py:450: UserWarning:
`model.predict_classes()` is deprecated and will be removed after 2021-01-01.
Please use instead:* `np.argmax(model.predict(x), axis=-1)`, if your model does
multi-class classification (e.g. if it uses a `softmax` last-layer activation).*
`(model.predict(x) > 0.5).astype("int32")`, if your model does binary
classification (e.g. if it uses a `sigmoid` last-layer activation).
warnings.warn('`model.predict_classes()` is deprecated and '
<tensorflow.python.keras.engine.sequential.Sequential object at 0x7f332565b450>
[0] [0]
[0]
classes = {'TRAIN': ['Non Malignant (No Cancer)','Malignant'],
'TEST': ['Non Malignant (No Cancer)','Malignant']}

predicted_class = classes['TRAIN'][predictions_c[0]]
print('We think that is {}.'.format(predicted_class.lower()))
We think that is non malignant (no cancer).

You might also like