You are on page 1of 23

SKlearn ‫ مكتبة‬:‫القسم العاشر‬

A. Data Preparation 10. Ensemble Classifier


1. Data files from SKlearn 11. K Nearest Neighbors
2. Data cleaning 12. Naïve Bayes
3. Metrics module 13. LDA , QDA
4. Feature Selection 14. Hierarchical Clusters
5. Data Scaling 15. DbScan
6. Data Split 16. NLP
17. Apriori
B. ML Algorithms
1. Linear Regression C. Algorithm Evaluation :
2. Logistic Regression 1. Model Check
3. Neural Network 2. Grid Search
4. SVR 3. Pipeline
5. SVC 4. Model Save
6. K-means
7. PCA D. Time Series
8. Decision Tree
9. Ensemble Regression

1
‫‪2.3) Neural Network‬‬
‫و هي استخدام الشبكات العصبية سواء للتوقع او التصنيف‬

‫ولها العديد من الكالسات ‪ ,‬أهمها ‪:‬‬

‫‪2.3.1‬‬ ‫‪neural_network.MLPRegressor‬‬ ‫‪:‬‬ ‫التوقع باستخدام الشبكات العصبية‬


‫‪2.3.2‬‬ ‫‪neural_network.MLPClassifier‬‬ ‫‪:‬‬ ‫التصنيف باستخدام الشبكات العصبية‬

‫‪2‬‬
‫‪2.3.1) MLPRegressor‬‬
‫‪ ‬و هي الخاصة\ ببناء الشبكات العصبية للتوقع‬
‫‪ ‬يتم استخدامها عبر الموديول ‪neural_network.MLPRegressor‬‬
‫‪ ‬الـ ‪ parameters‬المستخدمة في الموديل ‪:‬‬

‫‪‬‬ ‫‪hidden_layer_sizes‬‬ ‫تصميم الطبقات الخفية‬


‫‪‬‬ ‫‪activation‬‬ ‫اختيار دالة األكتيفاشن‬
‫‪‬‬ ‫‪solver‬‬ ‫طريقة عمل االوبتيميزاشن‬
‫‪‬‬ ‫‪epsilon‬‬ ‫قيمة ابسلون اذا ما سيتم استخدام ادم في السولفر‬
‫‪‬‬ ‫‪alpha‬‬ ‫معامل التنعيم‬
‫‪‬‬ ‫‪batch_size‬‬ ‫حجم الميني باتش‬
‫‪‬‬ ‫‪learning_rate‬‬ ‫معامل التعلم‬
‫‪‬‬ ‫‪max_iter‬‬ ‫العدد األقصي للمحاوالت‬
‫‪‬‬ ‫‪shuffle‬‬ ‫اذا ما كان سيقوم بعمل عشوائية للبيانات ام ال‬
‫‪‬‬ ‫‪early_stopping‬‬ ‫اذا ما كان سيتم تنفيذ االيقاف المبكر‬
‫‪‬‬ ‫‪learning_rate‬‬ ‫معامل التعلم‬
‫‪3‬‬
‫‪ ‬الـ ‪ attributes‬الخارجة\ من الموديل ‪:‬‬

‫‪‬‬ ‫_‪coef‬‬ ‫معامالت المعادلة التي تم عملها عبر الفيت‬


‫‪‬‬ ‫_‪intercept‬‬ ‫التقاطع مع محور اكس‬
‫‪‬‬ ‫_‪classes‬‬ ‫اسماء األعمدة المستخدمة‬
‫‪‬‬ ‫_‪n_iter‬‬ ‫عدد المحاوالت المستخدم‬
‫‪‬‬ ‫_‪n_layers‬‬ ‫عدد الطبقات المستخدمة‬

‫‪ ‬الـ ‪ methods‬المستخدمة مع الموديل ‪:‬‬

‫‪‬‬ ‫)‪fit(X, y‬‬ ‫لعمل الفيت‬


‫‪‬‬ ‫)‪predict(X‬‬ ‫لعمل التوقع‬
‫‪‬‬ ‫)‪score(X, y‬‬ ‫لمعرفة مدي كفاءة الخوارزم و درجته في التوقع‬

‫‪4‬‬
‫الصيغة العامة‬
# Import Libraries
from sklearn.neural_network import MLPRegressor
#----------------------------------------------------
#Applying MLPRegressor Model

'''
sklearn.neural_network.MLPRegressor(hidden_layer_sizes=(100, ), activation='relu’, solver=’adam’,
alpha=0.0001,batch_size='auto’, learning_rate=’constant’,
learning_rate_init=0.001, power_t=0.5,max_iter=200, shuffle=True,
random_state=None,tol=0.0001, verbose=False, warm_start=False,
momentum=0.9, nesterovs_momentum=True,early_stopping=False,
validation_fraction=0.1,beta_1=0.9, beta_2=0.999, epsilon=1E-08,
n_iter_no_change=10)
'''

MLPRegressorModel = MLPRegressor(activation='tanh', # can be also identity , logistic , relu


solver='lbfgs', # can be also sgd , adam
learning_rate='constant', # can be also invscaling , adaptive
early_stopping= False,
5
alpha=0.0001 ,hidden_layer_sizes=(100, 3),random_state=33)
MLPRegressorModel.fit(X_train, y_train)

#Calculating Details
print('MLPRegressorModel Train Score is : ' , MLPRegressorModel.score(X_train, y_train))
print('MLPRegressorModel Test Score is : ' , MLPRegressorModel.score(X_test, y_test))
print('MLPRegressorModel loss is : ' , MLPRegressorModel.loss_)
print('MLPRegressorModel No. of iterations is : ' , MLPRegressorModel.n_iter_)
print('MLPRegressorModel No. of layers is : ' , MLPRegressorModel.n_layers_)
print('MLPRegressorModel last activation is : ' , MLPRegressorModel.out_activation_)
#print('----------------------------------------------------')

#Calculating Prediction
y_pred = MLPRegressorModel.predict(X_test)
print('Predicted Value for MLPRegressorModel is : ' , y_pred[:10])

6
‫مثال‬
#Import Libraries
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
#----------------------------------------------------

#load boston data

BostonData = load_boston()

#X Data
X = BostonData.data
#y Data
y = BostonData.target

#----------------------------------------------------
#Splitting data
7
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=44, shuffle =True)

#----------------------------------------------------
#Applying MLPRegressor Model

'''
#sklearn.neural_network.MLPRegressor(hidden_layer_sizes=(100, ), activation='relu’, solver=’adam’,
# alpha=0.0001,batch_size='auto’, learning_rate=’constant’,
# learning_rate_init=0.001, power_t=0.5,max_iter=200, shuffle=True,
# random_state=None,tol=0.0001, verbose=False, warm_start=False,
# momentum=0.9, nesterovs_momentum=True,early_stopping=False,
# validation_fraction=0.1,beta_1=0.9, beta_2=0.999, epsilon=1E-08,
# n_iter_no_change=10)
'''

MLPRegressorModel = MLPRegressor(activation='tanh', # can be also identity , logistic , relu


solver='lbfgs', # can be also sgd , adam
learning_rate='constant', # can be also invscaling , adaptive
early_stopping= False,
alpha=0.0001 ,hidden_layer_sizes=(100, 3),random_state=33)
MLPRegressorModel.fit(X_train, y_train)
8
#Calculating Details
print('MLPRegressorModel Train Score is : ' , MLPRegressorModel.score(X_train, y_train))
print('MLPRegressorModel Test Score is : ' , MLPRegressorModel.score(X_test, y_test))
print('MLPRegressorModel loss is : ' , MLPRegressorModel.loss_)
print('MLPRegressorModel No. of iterations is : ' , MLPRegressorModel.n_iter_)
print('MLPRegressorModel No. of layers is : ' , MLPRegressorModel.n_layers_)
print('MLPRegressorModel last activation is : ' , MLPRegressorModel.out_activation_)
print('----------------------------------------------------')

#Calculating Prediction
y_pred = MLPRegressorModel.predict(X_test)
print('Predicted Value for MLPRegressorModel is : ' , y_pred[:10])

#----------------------------------------------------
#Calculating Mean Absolute Error
MAEValue = mean_absolute_error(y_test, y_pred, multioutput='uniform_average') # it can be raw_values
print('Mean Absolute Error Value is : ', MAEValue)

#----------------------------------------------------
#Calculating Mean Squared Error
MSEValue = mean_squared_error(y_test, y_pred, multioutput='uniform_average') # it can be raw_values
9
print('Mean Squared Error Value is : ', MSEValue)

#----------------------------------------------------
#Calculating Median Squared Error
MdSEValue = median_absolute_error(y_test, y_pred)
print('Median Squared Error Value is : ', MdSEValue )

10
: ‫مثال‬
from sklearn.neural_network import MLPRegressor
X = [[3,6,8],
[4,5,6],
[1,5,6],
[4,7,4],
[0,5,3],
[5,6,9],
[2,4,8],
[0,6,8]]
y = [6,3,9,8,5,4,2,5]

clf = MLPRegressor(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(100, 3),


random_state=1,learning_rate='constant',max_iter=100,activation='tanh')
clf.fit(X, y)
print('Coef = \n', clf.coefs_)
print('============================')

print('Prediction = ',clf.predict([[4,7,4]]))

11
: ‫مثال‬
‫ ثم عمل اختبار علي عشرين رقم‬, ‫ و رسمها باللون االزرق‬, sin ‫فكرتها تقوم علي عمل شبكة تقوم بالتدريب علي الف رقم الكس و واي والتي قيمتها‬
‫بينهم و رسم القيمة الناتجة علي نفس الجراف باللون االحمر لنري مدي التطابق‬

import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor

x = np.arange(0.0, 1, 0.001).reshape(-1, 1)
y = np.sin(2 * np.pi * x).ravel()

print('x = \n' ,x.shape)


print('y = \n' ,y.shape)

nn = MLPRegressor(
hidden_layer_sizes=(100,), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
random_state=0, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
12
n = nn.fit(x, y)
test_x = np.arange(0.0, 1, 0.05).reshape(-1, 1)
test_y = nn.predict(test_x)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(x, y, s=1, c='b', marker="s", label='real')
ax1.scatter(test_x,test_y, s=10, c='r', marker="o", label='NN Prediction')
plt.show()

13
‫‪2.3.2) MLPClassifier‬‬
‫‪ ‬و هي الخاصة\ ببناء الشبكات العصبية للتصنيف‬
‫‪ ‬يتم استخدامها عبر الموديول ‪neural_network.MLPClassifier‬‬
‫‪ ‬الـ ‪ parameters‬المستخدمة في الموديل ‪:‬‬

‫‪‬‬ ‫‪hidden_layer_sizes‬‬ ‫تصميم الطبقات الخفية‬


‫‪‬‬ ‫‪activation‬‬ ‫اختيار دالة األكتيفاشن‬
‫‪‬‬ ‫‪solver‬‬ ‫طريقة عمل االوبتيميزاشن‬
‫‪‬‬ ‫‪epsilon‬‬ ‫قيمة ابسلون اذا ما سيتم استخدام ادم في السولفر‬
‫‪‬‬ ‫‪alpha‬‬ ‫معامل التنعيم‬
‫‪‬‬ ‫‪batch_size‬‬ ‫حجم الميني باتش‬
‫‪‬‬ ‫‪learning_rate‬‬ ‫معامل التعلم‬
‫‪‬‬ ‫‪max_iter‬‬ ‫العدد األقصي للمحاوالت‬
‫‪‬‬ ‫‪shuffle‬‬ ‫اذا ما كان سيقوم بعمل عشوائية للبيانات ام ال‬
‫‪‬‬ ‫‪early_stopping‬‬ ‫اذا ما كان سيتم تنفيذ االيقاف المبكر‬
‫‪‬‬ ‫‪learning_rate‬‬ ‫معامل التعلم‬
‫‪14‬‬
‫‪ ‬الـ ‪ attributes‬الخارجة\ من الموديل ‪:‬‬

‫‪‬‬ ‫_‪coef‬‬ ‫معامالت المعادلة التي تم عملها عبر الفيت‬


‫‪‬‬ ‫_‪intercept‬‬ ‫التقاطع مع محور اكس‬
‫‪‬‬ ‫_‪classes‬‬ ‫اسماء األعمدة المستخدمة‬
‫‪‬‬ ‫_‪n_iter‬‬ ‫عدد المحاوالت المستخدم‬
‫‪‬‬ ‫_‪n_layers‬‬ ‫عدد الطبقات المستخدمة‬

‫‪ ‬الـ ‪ methods‬المستخدمة مع الموديل ‪:‬‬

‫‪‬‬ ‫)‪fit(X, y‬‬ ‫لعمل الفيت‬


‫‪‬‬ ‫)‪predict(X‬‬ ‫لعمل التوقع‬
‫‪‬‬ ‫)‪score(X, y‬‬ ‫لمعرفة مدي كفاءة الخوارزم و درجته في التوقع‬

‫‪15‬‬
‫الصيغة العامة‬
#Import Libraries
from sklearn.neural_network import MLPClassifier
#----------------------------------------------------
#Applying MLPClassifier Model

'''
sklearn.neural_network.MLPClassifier(hidden_layer_sizes=(100, ), activation='relu’, solver=’adam’,
alpha=0.0001,batch_size='auto’, learning_rate=’constant’,momentum=0.9,
learning_rate_init=0.001, power_t=0.5,max_iter=200, shuffle=True,
random_state=None, tol=0.0001, verbose=False, warm_start=False,
n_iter_no_change=10, nesterovs_momentum=True,early_stopping=False,
validation_fraction=0.1,beta_1=0.9, beta_2=0.999, epsilon=1E-08,)
'''

MLPClassifierModel = MLPClassifier(activation='tanh', # can be also identity , logistic , relu


solver='lbfgs', # can be also sgd , adam
learning_rate='constant', # can be also invscaling , adaptive
early_stopping= False,
alpha=0.0001 ,hidden_layer_sizes=(100, 3),random_state=33)
MLPClassifierModel.fit(X_train, y_train)
16
#Calculating Details
print('MLPClassifierModel Train Score is : ' , MLPClassifierModel.score(X_train, y_train))
print('MLPClassifierModel Test Score is : ' , MLPClassifierModel.score(X_test, y_test))
print('MLPClassifierModel loss is : ' , MLPClassifierModel.loss_)
print('MLPClassifierModel No. of iterations is : ' , MLPClassifierModel.n_iter_)
print('MLPClassifierModel No. of layers is : ' , MLPClassifierModel.n_layers_)
print('MLPClassifierModel last activation is : ' , MLPClassifierModel.out_activation_)
print('----------------------------------------------------')

#Calculating Prediction
y_pred = MLPClassifierModel.predict(X_test)
y_pred_prob = MLPClassifierModel.predict_proba(X_test)
print('Predicted Value for MLPClassifierModel is : ' , y_pred[:10])
print('Prediction Probabilities Value for MLPClassifierModel is : ' , y_pred_prob[:10])

17
‫مثال‬
#Import Libraries
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
#----------------------------------------------------

#load iris data

IrisData = load_iris()

#X Data
X = IrisData.data

#y Data
y = IrisData.target

#----------------------------------------------------
18
#Splitting data

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=44, shuffle =True)


#----------------------------------------------------
#Applying MLPClassifier Model
'''
sklearn.neural_network.MLPClassifier(hidden_layer_sizes=(100, ), activation='relu’, solver=’adam’,
alpha=0.0001,batch_size='auto’, learning_rate=’constant’,momentum=0.9,
learning_rate_init=0.001, power_t=0.5,max_iter=200, shuffle=True,
random_state=None, tol=0.0001, verbose=False, warm_start=False,
n_iter_no_change=10, nesterovs_momentum=True,early_stopping=False,
validation_fraction=0.1,beta_1=0.9, beta_2=0.999, epsilon=1E-08,)
'''

MLPClassifierModel = MLPClassifier(activation='tanh', # can be also identity , logistic , relu


solver='lbfgs', # can be also sgd , adam
learning_rate='constant', # can be also invscaling , adaptive
early_stopping= False,
alpha=0.0001 ,hidden_layer_sizes=(100, 3),random_state=33)
MLPClassifierModel.fit(X_train, y_train)

#Calculating Details
19
print('MLPClassifierModel Train Score is : ' , MLPClassifierModel.score(X_train, y_train))
print('MLPClassifierModel Test Score is : ' , MLPClassifierModel.score(X_test, y_test))
print('MLPClassifierModel loss is : ' , MLPClassifierModel.loss_)
print('MLPClassifierModel No. of iterations is : ' , MLPClassifierModel.n_iter_)
print('MLPClassifierModel No. of layers is : ' , MLPClassifierModel.n_layers_)
print('MLPClassifierModel last activation is : ' , MLPClassifierModel.out_activation_)
print('----------------------------------------------------')

#Calculating Prediction
y_pred = MLPClassifierModel.predict(X_test)
y_pred_prob = MLPClassifierModel.predict_proba(X_test)
print('Predicted Value for MLPClassifierModel is : ' , y_pred[:10])
print('Prediction Probabilities Value for MLPClassifierModel is : ' , y_pred_prob[:10])

#----------------------------------------------------
#Calculating Confusion Matrix
CM = confusion_matrix(y_test, y_pred)
print('Confusion Matrix is : \n', CM)

# drawing confusion matrix


sns.heatmap(CM, center = True)
plt.show()
20
: ‫مثال‬
from sklearn.neural_network import MLPClassifier
X = [[3,6,8],
[4,5,6],
[1,5,6],
[4,7,4],
[0,5,3],
[5,6,9],
[2,4,8],
[0,6,8]]
y = [0,1,1,1,0,0,0,1]

clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(100, 3),


random_state=1,learning_rate='constant',max_iter=100,activation='tanh')
clf.fit(X, y)
print('Coef = \n', clf.coefs_)
print('============================')
print('Prediction = ',clf.predict([[10,3,10]]))
print('Prediction = ',clf.predict([[3,7,9]]))

21
: ‫مثال‬
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score ,confusion_matrix

df = pd.read_csv('Dataset_spine.csv')
df = df.drop(['Unnamed: 13'], axis=1)
df.head()

df.describe()

df = df.drop(['Col7','Col8','Col9','Col10','Col11','Col12'], axis=1)
df.head()

y = df['Class_att']
22
x = df.drop(['Class_att'], axis=1)
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size= 0.25,random_state=27)

x_train
x_test
y_train
y_test

clf = MLPClassifier(hidden_layer_sizes=(100), max_iter=500, alpha=0.0001,solver='sgd', verbose=10,


random_state=21,tol=0.000000001)

clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)

accuracy_score(y_test, y_pred)

cm = confusion_matrix(y_test, y_pred)
cm

sns.heatmap(cm, center=True)
plt.show()

23

You might also like