You are on page 1of 5

3/21/24, 5:20 PM Iris_regression - Jupyter Notebook

In [15]:  import pandas as pd


import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score

import time

In [3]:  df = pd.read_csv('Iris.csv')
df.head()

Out[3]: Id SepalLengthCm SepalWidthCm PetalLengthCm PetalWidthCm Species

0 1 5.1 3.5 1.4 0.2 Iris-setosa

1 2 4.9 3.0 1.4 0.2 Iris-setosa

2 3 4.7 3.2 1.3 0.2 Iris-setosa

3 4 4.6 3.1 1.5 0.2 Iris-setosa

4 5 5.0 3.6 1.4 0.2 Iris-setosa

In [12]:  iris = datasets.load_iris()

localhost:8888/notebooks/Iris_regression.ipynb 1/5
3/21/24, 5:20 PM Iris_regression - Jupyter Notebook

In [13]:  import matplotlib.pyplot as plt



_, ax = plt.subplots()
scatter = ax.scatter(iris.data[:, 0], iris.data[:, 1], c=iris.target)
ax.set(xlabel=iris.feature_names[0], ylabel=iris.feature_names[1])
_ = ax.legend(
scatter.legend_elements()[0], iris.target_names, loc="lower right",
)

localhost:8888/notebooks/Iris_regression.ipynb 2/5
3/21/24, 5:20 PM Iris_regression - Jupyter Notebook

In [14]:  ​
import mpl_toolkits.mplot3d

from sklearn.decomposition import PCA

fig = plt.figure(1, figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d", elev=-150, azim=110)

X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(
X_reduced[:, 0],
X_reduced[:, 1],
X_reduced[:, 2],
c=iris.target,
s=40,
)

ax.set_title("First three PCA dimensions")
ax.set_xlabel("1st Eigenvector")
ax.xaxis.set_ticklabels([])
ax.set_ylabel("2nd Eigenvector")
ax.yaxis.set_ticklabels([])
ax.set_zlabel("3rd Eigenvector")
ax.zaxis.set_ticklabels([])

plt.show()

localhost:8888/notebooks/Iris_regression.ipynb 3/5
3/21/24, 5:20 PM Iris_regression - Jupyter Notebook

In [27]:  X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.6

# Create a classification model (here, Gaussian Naive Bayes)
model = GaussianNB()

# Train the model on the training data
model.fit(X_train, y_train)

# Use the trained model to make predictions on the test data
y_pred = model.predict(X_test)

# Evaluate the accuracy of the model
accuracy = accuracy_score(y_test, y_pred)

# Print the accuracy
print(f"Accuracy of the model: {accuracy:.2f}")

Accuracy of the model: 0.92

In [28]:  from sklearn.metrics import confusion_matrix





cm = confusion_matrix(y_test, y_pred)


tp = cm.diagonal()

#
for i in range(len(tp)):
class_label = iris.target_names[i] # Get class label name from iris.
fp = cm.sum(axis=0)[i] - tp[i] # False positives for the class
fn = cm.sum(axis=1)[i] - tp[i] # False negatives for the class
precision_class = tp[i] / (tp[i] + fp)
recall_class = tp[i] / (tp[i] + fn)

# Print precision and recall for each class


print(f"Precision ({class_label}): {precision_class:.2f}")
print(f"Recall ({class_label}): {recall_class:.2f}")

Precision (setosa): 1.00


Recall (setosa): 1.00
Precision (versicolor): 0.96
Recall (versicolor): 0.81
Precision (virginica): 0.81
Recall (virginica): 0.96

In [ ]:  ​

In [ ]:  ​

localhost:8888/notebooks/Iris_regression.ipynb 4/5
3/21/24, 5:20 PM Iris_regression - Jupyter Notebook

localhost:8888/notebooks/Iris_regression.ipynb 5/5

You might also like