Professional Documents
Culture Documents
LAB MANUAL
MACHINE LEARNING
Prepared By:
CERTIFICATE
Index
Sr. date PRACTICAL PAGE Sign
No.
v1=np.arange(1,33)
print(v1)
print('----------------------------------')
v2=np.mean(v1)
print(v2)
print('----------------------------------')
v4=np.arange(1,11)
v3=np.median(v4)
print(v3)
print('----------------------------------')
v5=np.arange(1,11)
v6=np.arange(2,22)
#v5%2
#print(v5)
sum=0
i=0
for i in range(1,11):
sum=sum+i
print(sum)
sum/11
print(sum)
Output:
plt.hist(x, 100)
plt.show()
Output:
importmatplotlib.pyplot as plt
plt.scatter(x, y)
plt.show()
Output:
def select():
label.config(text = sel)
top = Tk()
top.geometry("200x100")
v = DoubleVar()
scale.pack(anchor=CENTER)
btn.pack(anchor=CENTER)
label = Label(top)
label.pack()
top.mainloop()
Output:
scale = StandardScaler()
df = pandas.read_csv("cars2.csv")
X = df[['Weight', 'Volume']]
scaledX = scale.fit_transform(X)
print(scaledX)
Output:
import numpy
numpy.random.seed(2)
x = numpy.random.normal(3, 1, 100)
train_x = x[:80]
train_y = y[:80]
test_x = x[20:]
test_y = y[20:]
plt.scatter(train_x, train_y)
plt.plot(myline, mymodel(myline))
plt.show()
Output:
import numpy
import matplotlib.pyplot as plt
numpy.random.seed(2)
x = numpy.random.normal(3, 1, 100)
y = numpy.random.normal(150, 40, 100) / x
train_x = x[:80]
train_y = y[:80]
test_x = x[80:]
test_y = y[80:]
plt.scatter(train_x, train_y)
plt.plot(myline, mymodel(myline))
plt.show()
Output:
import pandas as pd
df = pd.read_csv('data_set/Social_Network_Ads.csv')
header = list(df.columns)
lst = df.values.tolist()
t = build_tree(trainDF, header)
leaves = getLeafNodes(t)
innerNodes = getInnerNodes(t)
maxAccuracy = computeAccuracy(testDF, t)
print_tree(t)
nodeIdToPrune = -1
if node.id != 0:
prune_tree(t, [node.id])
currentAccuracy = computeAccuracy(testDF, t)
maxAccuracy = currentAccuracy
nodeIdToPrune = node.id
t = build_tree(trainDF, header)
if maxAccuracy == 1:
break
if nodeIdToPrune != -1:
t = build_tree(trainDF, header)
prune_tree(t, [nodeIdToPrune])
else:
t = build_tree(trainDF, header)
print("\n********************************************************************")
print("********************************************************************\n")
print_tree(t)
Output:
import pandas as pd
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
classifier = KNeighborsClassifier(n_neighbors=5)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
error = []
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
plt.figure(figsize=(12, 6))
markerfacecolor='blue', markersize=10)
plt.xlabel('K Value')
plt.ylabel('Mean Error')
Output:
import pandas as pd
dataset = pd.read_csv('Mall_Customers.csv')
wcss = []
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
y_kmeans = kmeans.fit_predict(X)
plt.title('Clusters of customers')
plt.legend()
plt.show()
Output:
import pandas as pd
dataset = pd.read_csv('Mall_Customers.csv')
plt.title('Dendrogram')
plt.xlabel('Customers')
plt.ylabel('Euclidean distances')
plt.show()
y_hc = hc.fit_predict(X)
plt.title('Clusters of customers')
plt.legend()
plt.show()
Output: