Professional Documents
Culture Documents
Import From Import: Os Google - Colab Drive Drive - Mount (,force - Remount )
Import From Import: Os Google - Colab Drive Drive - Mount (,force - Remount )
os #manejo de archivos
from google.colab import drive
drive.mount("/content/drive",force_remount=True)
path="/content/drive/MyDrive/IA/Imagenes"
categorias_names=os.path.join(path)
print(os.listdir(categorias_names))
for i in os.listdir(categorias_names):
name_img=os.path.join(path+"/"+i)
print("El numero de imagenes con etiqueta {} son: {}".format(i,len(os.listdir(n
ame_img))))
##Crear una CNN
import tensorflow as tf
import numpy as np
modelo= tf.keras.Sequential([
#Primera Convolución y Maxpooling
tf.keras.layers.Conv2D(16,
(3,3),activation="relu",input_shape=(300,300,3)),
tf.keras.layers.MaxPooling2D(2,2),
#Segunda Convolución y Maxpooling
tf.keras.layers.Conv2D(32,(3,3),activation="relu"),
tf.keras.layers.MaxPooling2D(2,2),
#Tercera Convolución y Maxpooling
tf.keras.layers.Conv2D(64,(3,3),activation="relu"),
tf.keras.layers.MaxPooling2D(2,2),
#Cuarta Convolución y Maxpooling
tf.keras.layers.Conv2D(64,(3,3),activation="relu"),
tf.keras.layers.MaxPooling2D(2,2),
#Quinta Convolución y Maxpooling
tf.keras.layers.Conv2D(64,(3,3),activation="relu"),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512,activation="relu"),
tf.keras.layers.Dense(2,activation="sigmoid")
])
modelo.summary()
from tensorflow.keras.optimizers import RMSprop
modelo.compile(loss="categorical_crossentropy",optimizer=RMSprop(learning_rate=0.
001),metrics=["accuracy"])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen=ImageDataGenerator(rescale=1/255)
train_generator=train_datagen.flow_from_directory(
path,
target_size=(300,300),
batch_size=50,
class_mode="categorical"
)
pasos_max=len(train_generator)
print(pasos_max)
modelo.fit(train_generator,steps_per_epoch=pasos_max,epochs=20,verbose=1)
#Validacion de Modelo
from tensorflow.keras.preprocessing import image
path="/content/drive/MyDrive/CNN/Perros y Gatos/Perros/dog.180.jpg"
img=image.load_img(path,target_size=(300,300))
x=image.img_to_array(img)
x=np.expand_dims(x,axis=0)
images=np.vstack([x])
prediccion=modelo.predict(images)
print(prediccion)
if prediccion[0][0]>0.5:
print("Es Gato")
else:
print("No es gato")
if prediccion[0][1]>0.5:
print("Es Perro")
else:
print("No es perro")
##Guardar una red neuronal en archivos *.json y *.h5
modelo_json=modelo.to_json()
path_json="/content/drive/MyDrive/NN_Entrenadas/perros_gatos.json"
with open(path_json,"w") as json_file:
json_file.write(modelo_json)
json_file.close()
path_h5="/content/drive/MyDrive/NN_Entrenadas/perros_gatos.h5"
modelo.save_weights(path_h5)
print("Modelo Guardado!!!")
UTILIZAR ARCHIVO
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing import image
import os
from google.colab import drive
drive.mount("/content/drive",force_remount=True)
###Abrir una red neuronal guardada a partir de archivos json y h5
path_json="/content/drive/MyDrive/NN_Entrenadas/perros_gatos.json"
path_h5="/content/drive/MyDrive/NN_Entrenadas/perros_gatos.h5"
json_file=open(path_json,"r")
model_json=json_file.read()
json_file.close()
modelo_cargado=tf.keras.models.model_from_json(model_json)
modelo_cargado.load_weights(path_h5)
print("Se cargo el modelo !!")
path="/content/drive/MyDrive/CNN/Perros y Gatos/Perros/dog.96.jpg"
img=image.load_img(path,target_size=(300,300))
x=image.img_to_array(img)
x=np.expand_dims(x,axis=0)
images=np.vstack([x])
prediccion=modelo_cargado.predict(images)
print(prediccion)
if prediccion[0][0]>0.5:
print("Es Gato")
else:
print("No es gato")
if prediccion[0][1]>0.5:
print("Es Perro")
else:
print("No es perro")
[[0. 1.]]
No es gato
Es Perro