You are on page 1of 6

EXAMPLE_MLP_FROM_BOOK_BETTER

n_samples=1000

train_X,test_X=X[:n_train,:], X[n_train:,:]
train_y, test_y=y[:n_train], y[n_train:]
history=model.fit(train_X,train_y,validation_data=(test_X, test_y), batch_size=len(train_X),epochs=200,
verbose=0)

#evaluate model
_,train_acc=model.evaluate(train_X,train_y)
_,test_acc=model.evaluate(test_X,test_y)
print('Train: %.3f, Test:%.3f'%(train_acc,test_acc))
Train: 0.812, Test:0.836
EXAMPLE_MLP_FROM_BOOK_BETTER-COPY1
n_samples=1000

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)


history=model.fit(X_train,y_train,validation_data=(X_test, y_test), batch_size=len(X_train),epochs=200,
verbose=0)
#evaluate model
_,train_acc=model.evaluate(X_train,y_train)
_,test_acc=model.evaluate(X_test,y_test)

Train: 0.804, Test:0.830


EXAMPLE_MLP_FROM_BOOK_BETTER-COPY4
n_samples=10000
X,y=make_blobs(n_samples=10000, centers=3, n_features=2, cluster_std=2, random_state=seed)
y=to_categorical(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
history=model.fit(X_train,y_train,validation_data=(X_test, y_test), batch_size=len(X_train),epochs=200,
verbose=0)
#evaluate model
_,train_acc=model.evaluate(X_train,y_train)
_,test_acc=model.evaluate(X_test,y_test)

Train: 0.805, Test:0.824


EXAMPLE_MLP_FROM_BOOK_BETTER-COPY2

n_samples=1000

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)


history=model.fit(X_train,y_train,validation_split=0.2, batch_size=len(X_train),epochs=200, verbose=0)
#evaluate model
_,train_acc=model.evaluate(X_train,y_train)
_,test_acc=model.evaluate(X_test,y_test)
print('Train: %.3f, Test:%.3f'%(train_acc,test_acc))
Train: 0.809, Test:0.815
EXAMPLE_MLP_FROM_BOOK_BETTER-COPY3

n_samples=10000

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)


history=model.fit(X_train,y_train,validation_split=0.2, batch_size=len(X_train),epochs=200, verbose=0)
#evaluate model
_,train_acc=model.evaluate(X_train,y_train)
_,test_acc=model.evaluate(X_test,y_test)
print('Train: %.3f, Test:%.3f'%(train_acc,test_acc))
Train: 0.798, Test:0.816
EXAMPLE_MLP_FROM_BOOK_BETTER-COPY5

n_samples=10000

X,y=make_blobs(n_samples=10000, centers=3, n_features=2, cluster_std=2, random_state=seed)


y=to_categorical(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)

history=model.fit(X_train,y_train,validation_split=0.2, batch_size=32,epochs=200, verbose=0)


#evaluate model
_,train_acc=model.evaluate(X_train,y_train)
_,test_acc=model.evaluate(X_test,y_test)
print('Train: %.3f, Test:%.3f'%(train_acc,test_acc))

Train: 0.823, Test:0.836

You might also like