You are on page 1of 2

# ANOVA test using python

import numpy as np
from scipy.stats import f_oneway
OUTPUT

[[0.09090909 0.33333333]
OUTPUT
a = np.array([[9.87, 9.03, 5.81], [1.15384615 0.39735099 0.73643411]
[7.87, 7.03, 6.81],[8.87, 9.03, 7.81],

1 0.000000 0.000000
0 0.090909 0.333333

2 1.000000 1.000000
[0.34189339 0.67896701 0.49535353]
[7.87, 6.03, 8.81],[9.87, 9.03, 6.81],

weight price
[6.87, 7.03, 6.81]])

]]
]
weight price
b = np.array([[5.87, 6.03, 7.81],

2 800 5
0 300 3
1 250 2
[7.87, 9.03, 8.81],[4.87, 9.03, 7.81],

0.
1.
[7.87, 6.03, 8.81],[9.87, 7.03, 6.81],
[6.87, 8.03, 7.81]])

[0.
[1.
c = np.array([[6.87, 6.03, 5.81],
[8.87, 7.03, 5.81],[6.87, 8.03, 8.81],
[9.87, 9.03, 9.81],[8.87, 7.03, 7.81],
[5.87, 6.03, 6.81]])

normalized_data=Scaler.fit_transform(d
f,p = f_oneway(a,b,c)

normalized_df=pd.DataFrame(normaliz
print(f)

data={'weight':[300,250,800],'price':

from sklearn.preprocessing import


print(p)

ed_data,columns=df.columns)
OUTPUT

df=pd.DataFrame(data)

print(normalized_data)
Scaler=MinMaxScaler()
# generate grids in

print(normalized_df)
import pandas as pd
numpy [-4. -3. -2. -1. 0. 1. 2. 3. 4.]
[-5. -4. -3. -2. -1. 0. 1. 2. 3. 4. 5.]

MinMaxScaler
#MinMax
import numpy as np [[-4. -3. -2. -1. 0. 1. 2. 3. 4.]

print(df)
x = np.linspace(-4,4,9) [-4. -3. -2. -1. 0. 1. 2. 3. 4.]

[3,2,5]}
y = np.linspace(- [-4. -3. -2. -1. 0. 1. 2. 3. 4.]

f)
5,5,11) [-4. -3. -2. -1. 0. 1. 2. 3. 4.]
print(x) [-4. -3. -2. -1. 0. 1. 2. 3. 4.]
[-4. -3. -2. -1. 0. 1. 2. 3. 4.]
print(y)
[-4. -3. -2. -1. 0. 1. 2. 3. 4.]
[-4. -3. -2. -1. 0. 1. 2. 3. 4.]
xx,yy = [-4. -3. -2. -1. 0. 1. 2. 3. 4.]

coefs_.append(clf.coef_.rav
np.meshgrid(x,y) [-4. -3. -2. -1. 0. 1. 2. 3. 4.]

coefs_=np.array(coefs_)
print(xx); [-4. -3. -2. -1. 0. 1. 2. 3. 4.]]
clf.set_params(C=c)

OUTPUT
print(coefs_)
#iris dataset using destreealgo
clf.fit(X,y)

el().copy())
for c in cs:

import pandas as pd # Creating Decision Tree Classifier


import numpy as np from sklearn.tree import
from sklearn.datasets import load_iris DecisionTreeClassifier
from sklearn.metrics import clf = DecisionTreeClassifier()
accuracy_score clf.fit(X_train,y_train)

6,max_iter=int(1000),warm_start=True,i
cs=l1_min_c(X,y,loss="log")*np.logspac

# Reading the Iris.csv file # Predict Accuracy Score clf=linear_model.LogisticRegression(pe


data = load_iris() y_pred = clf.predict(X_test) nalty="l1",solver="liblinear",tol=1e-
from sklearn.svm import l1_min_c
from sklearn import linear_model

# Extracting Attributes / Features print("Train data


X = data.data accuracy:",accuracy_score(y_true =
from sklearn import datasets

# Extracting Target / Class Labels y_train, y_pred=clf.predict(X_train)))


ntercept_scaling=10000.0,)
#logistic_Regression

y = data.target print("Test data


iris=datasets.load_iris()

# Import Library for splitting data accuracy:",accuracy_score(y_true =


import numpy as np

from sklearn.model_selection import y_test, y_pred=y_pred))


train_test_split
OUTPUT
# Creating Train and Test datasets
y=iris.target

X/=X.max()
X=iris.data

X_train, X_test, y_train, y_test = Train data accuracy: 1.0


X=X[y!=2]
y=y[y!=2]

coefs_=[]
e(0,7,16)

train_test_split(X,y, random_state = Test data accuracy: 0.947368421052


50, test_size = 0.25)

#KNN
import pandas as pd clf.fit(X_train,y_train)
import numpy as np y_pred=clf.predict(X_test)
print("Train accuracy") OUTPUT
from sklearn.neighbors import
KNeighborsClassifier print(accuracy_score(y_true=y_train,y_
from sklearn.datasets import pred=clf.predict(X_train)))
load_iris print("Test accuracy") [-1.17746754 -0.98975533 -0.98975533
from sklearn.metrics import print(accuracy_score(y_true=y_test,y_
pred=y_pred)) -0.80204311 -0.80204311 0.13651798
accuracy_score
from sklearn.model_selection import 0.13651798 0.32423019 0.88736685
train_test_split OUTPUT 1.4505035 1.82592793]
data=load_iris()
X=data.data Train accuracy
y=data.target 0.9714285714285714
X_train,X_test,y_train,y_test=train_te
st_split(X,y,random_state=50,test_siz
Test accuracy
e=0.3) 0.9333333333333333
clf=KNeighborsClassifier()
data=np.array([5,6,6,7,7,1
import scipy.stats as stats
[[

print(stats.zscore(data))
[
[
[
[
[
[
[
[
[
[
[
[
[
[
[
0.
0.
0.

0.
0.
0.

0.
0.
0.
0.
0.

0.
0.

0.
0.

0.

import pandas as pd
import numpy as np

2,12,13,16,19,21])
-3.38655447 153.51608089
-4.44955101 138.95349466

0.

0.
0.

0.
0.
0.
0.
0.
0.
0.
0.
0.
0.

0.
129.94306089 0.
116.68687884 0.
103.60371204 0.
90.74234538 0.
78.58316259 0.
66.89375363 0.
55.88859966 0.

27.9226068 0.
20.56017659 0.
13.96394618 0.
45.65791854 0.
36.29241109 0.

7.61796258 0.
0.

#ZScore
0. ]
]
]
]
]
]
]

]
]
]
0.
0.
]
]
]
]
]]
#gen and plot class. data using OUTPUT
skicit-learn one informative feature and one
from sklearn.datasets import cluster per class", fontsize="12")
make_classification plt.scatter(X[:, 0], X[:, 1], marker="o",
import matplotlib.pyplot as plt c=y, s=40, edgecolor="k")
# Creating the classification dataset plt.show()
with one informative feature and one
cluster per class
X, y =
make_classification(n_features=2,
n_redundant=0, n_informative=1,
n_clusters_per_class=1)
# Plotting the dataset
plt.figure(figsize=(7.50, 3.50))
plt.subplots_adjust(bottom=0.05,
top=0.9, left=0.05, right=0.95)
plt.subplot(111)
plt.title("Classification dataset with

#BIAS AND VARIANCE random_state=1)


# pip install mlxtend in cmd # define the model
import pandas as pd model = LinearRegression()
from pandas import read_csv # estimate bias and variance
from sklearn.model_selection import mse, bias, var =
train_test_split bias_variance_decomp(model,
from sklearn.linear_model import X_train, y_train, X_test, y_test,
LinearRegression loss='mse', num_rounds=200,
from mlxtend.evaluate import random_seed=1)
bias_variance_decomp # summarize results
# load dataset print('MSE: %.3f' % mse)
dataframe = read_csv('housing.csv') print('Bias: %.3f' % bias)
# separate into inputs and outputs print('Variance: %.3f' % var)
data = dataframe.values
X, y = data[:, :-1], data[:, -1]
OUTPUT
# split the data MSE: 22.487
X_train, X_test, y_train, y_test = Bias: 20.726
train_test_split(X, y, test_size=0.33, Variance: 1.761

You might also like