You are on page 1of 2

311521205051

PROGRAM:
import numpy as np
from sklearn.ensemble import VotingRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# Load the Boston housing dataset
housing = fetch_california_housing()
# Split the dataset into training and test sets
X_train, X_test, y_train, y_test = train_test_split(housing.data, housing.target,
test_size=0.2, random_state=42)
# Create individual models
model1 = LinearRegression()
model2 = DecisionTreeRegressor(random_state=42)
# Create a voting ensemble model
ensemble_model = VotingRegressor(estimators=[('lr', model1), ('dt', model2)])
# Fit the ensemble model to the training data
ensemble_model.fit(X_train, y_train)
# Make predictions on the test data
y_pred = ensemble_model.predict(X_test)
# Calculate the mean squared error of the predictions
mse = mean_squared_error(y_test, y_pred)
print(f"Ensemble model MSE: {mse:.2f}")
OUTPUT:

RESULT:
311521205051

PROGRAM:
# synthetic classification dataset
from numpy import where
from sklearn.datasets import make_classification
from matplotlib import pyplot
# define dataset
X, y = make_classification(n_samples=1000, n_features=2, n_informative=2,
n_redundant=0, n_clusters_per_class=1, random_state=4)
# create scatter plot for samples from each class
for class_value in range(2):
# get row indexes for samples with this class
row_ix = where(y == class_value)
# create scatter of these samples
pyplot.scatter(X[row_ix, 0], X[row_ix, 1])
# show the plot
pyplot.show()
OUTPUT:

RESULT:

You might also like