You are on page 1of 2

from google.

colab import drive

drive.mount('/gdrive')

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Dense,Input,Layer

from sklearn.model_selection import train_test_split

import numpy

import matplotlib.pyplot as plt

# load pima indians dataset

dataset = numpy.loadtxt("/gdrive/My Drive/DEEPLEARNINGLAB/diabetes.csv", delimiter=",")

# split into input (X) and output (Y) variables

X = dataset[:,0:8]

Y = dataset[:,8]

X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.20,)# random_state=seed)

# create model

model = Sequential()

model.add(Dense(12, input_dim=8, kernel_initializer= 'uniform' , activation= 'relu' ))

model.add(Dense(8, kernel_initializer= 'uniform' , activation= 'relu' ))

model.add(Dense(1, kernel_initializer= 'uniform' , activation= 'sigmoid' ))

from tensorflow.keras.layers import Dense,Input

from tensorflow.keras import layers,Model

input=Input(shape=(12,1))

x1=layers.Dense(8,activation='relu')(input)

x2=layers.Dense(1,activation='softmax')(x1)

modelf=Model(input,x2)

# Compile model

model.compile(loss= 'binary_crossentropy', optimizer= tf.keras.optimizers.SGD(0.0001) ,


metrics=[ 'accuracy' ])

# Fit the model

history=model.fit(X_train, y_train, validation_data=(X_test,y_test), epochs=10, batch_size=10)

plt.plot(history.history[ "accuracy" ])

plt.plot(history.history[ "val_accuracy" ])
plt.title( "model accuracy" )

plt.ylabel( "accuracy" )

plt.xlabel( "epoch" )

plt.legend([ "train" , "test" ], loc= "upper left" )

plt.show()

# summarize history for loss

plt.plot(history.history[ "loss" ])

plt.plot(history.history[ "val_loss" ])

plt.title( "model loss" )

plt.ylabel( "loss" )

plt.xlabel( "epoch" )

plt.legend([ "train" , "test" ], loc= "upper left" )

plt.savefig("/gdrive/My Drive/DEEPLEARNINGLAB/Exp1loss.png")

plt.show()

# evaluate the model

scores = model.evaluate(X, Y)

print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

You might also like