Professional Documents
Culture Documents
9 Project-I
10 Project-II
Practical -1
Theory:
Output:
2. SciPy:
from scipy.fftpack
import fft, ifft
x= np.array([0,1,2,3])
y= fft(x)
print(y)
Output:
3. Scikit-leran:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
digits = load_digits()
digits.data.shape
Output:
4. Theano:
import theano
import theano.tensor as T
x = T.dmatrix('x')
s = 1 / (1 + T.exp(-x))
logistic = theano.function([x], s)
logistic([[0, 1], [-1, -2]])
Output:
5. Tensorflow:
Step 1: Define the variables. here In this example, the values are:
x = 1, y = 2, and z = 3
Step 2: Add x and y.
Step 3: Now Multiply z with the sum of x and y.
Finally, the result comes as ‘9’.
6. PyTorch:
shape = (2,3,)
rand_tensor = torch.rand(shape)
ones_tensor=torch.ones(shape)
zeros_tensor=torch.zeros(shape)
print(f"Random Tensor: \n {rand_tensor} \n")
print(f"Ones Tensor: \n {ones_tensor} \n")
print(f"Zeros Tensor: \n {zeros_tensor}")
Output:
Random Tensor: tensor([[0.0048, 0.9871, 0.2899], [0.8372, 0.5228, 0.4136]])
Ones Tensor: tensor([[1., 1., 1.], [1., 1., 1.]])
Zeros Tensor: tensor([[0., 0., 0.], [0., 0., 0.]])
7. Pandas:
import pandas as pd
data = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Delhi", "Beijing", "Pretoria"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98] }
data_table = pd.DataFrame(data)
print(data_table)
Output:
8. Matplotlib:
import numpy as np
import matplotlib.pyplot as plt
# Compute the x and y coordinates for points on a sine curve
x = np.arange(0, 3 * np.pi, 0.1)
y = np.sin(x) plt.title("sine wave form")
# Plot the points using matplotlib
plt.plot(x, y)
plt.show()
Output:
9. Seaborn:
Example 1:
import seaborn as sns
sns.set(style="dark")
fmri = sns.load_dataset("fmri")
# Plot the responses for different\ events and regions
sns.lineplot(x="timepoint",
y="signal",
hue="region",
style="event",
data=fmri)
Output:
Example 2:
# Importing libraries
import numpy as np
import seaborn as sns
# Selecting style as white, dark, whitegrid, darkgrid or ticks
sns.set(style="white")
# Generate a random univariate dataset
rs = np.random.RandomState(10)
d = rs.normal(size=100)
# Plot a simple histogram and kde with binsize determined automatically
sns.distplot(d, kde=True, color="m")
Output:
Conclusion:
Practical -2
Uses of PCA:
• It is used to find inter-relation between variables in the data.
• It is used to interpret and visualize data.
• The number of variables is decreasing and it makes further analysis simpler.
• It’s often used to visualize genetic distance and relatedness between populations.
Conclusion:
Practical -3
Theory:
#Import Dependencies
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense
from sklearn.metrics import accuracy_score
Conclusion:
Practical -4
Theory:
# Learning rate
learning_rate = 0.01
# Number of loops for training through all your data to update the parameters
training_epochs = 100
# the training dataset
x_train = np.linspace(0, 10, 100)
y_train = x_train + np.random.normal(0,1,100)
# plot of data
plt.scatter(x_train, y_train)
# declare weights
weight = tf.Variable(0.)
bias = tf.Variable(0.)
# Define linear regression expression y
def linreg(x):
y = weight*x + bias
return y
# train model
for epoch in range(training_epochs):
# Get gradients
gradients = tape.gradient(loss, [weight,bias])
# Adjust weights
weight.assign_sub(gradients[0]*learning_rate)
bias.assign_sub(gradients[1]*learning_rate)
# Print output
print(f"Epoch count {epoch}: Loss value: {loss.numpy()}")
print(weight.numpy())
print(bias.numpy())
Result/Output:
Conclusion:
Practical -5
Theory:
import numpy as np
import seaborn as sns
sns.set(style='whitegrid')
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from subprocess import check_output
iris = pd.read_csv('C:/Users/soniy/Iris.csv')
iris.shape
iris.head()
iris = iris[:100]
iris.shape
iris.Species = iris.Species.replace(to_replace=['Iris-setosa', 'Iris-versicolor'],
value=[0, 1])
plt.scatter(iris[:50].SepalLengthCm,iris[:50].SepalWidthCm,label='Iris-setosa')
plt.scatter(iris[51:].SepalLengthCm,iris[51:].SepalWidthCm,label='Iris-versicolo')
plt.xlabel('SepalLength')
plt.ylabel('SepalWidth')
plt.legend(loc='best')
X = iris.drop(labels=['Id', 'Species'], axis=1).values
y = iris.Species.values
# diff set
test_index = np.array(list(set(range(len(X))) - set(train_index)))
train_X = X[train_index]
train_y = y[train_index]
test_X = X[test_index]
test_y = y[test_index]
tf.compat.v1.global_variables_initializer()
A = tf.Variable(tf.random.normal(shape=[4, 1]))
b = tf.Variable(tf.random.normal(shape=[1, 1]))
init =tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Define placeholders
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# training model
for epoch in range(iter_num):
# Generate random batch index
batch_index = np.random.choice(len(train_X), size=batch_size)
batch_train_X = train_X[batch_index]
batch_train_y = np.matrix(train_y[batch_index]).T
sess.run(goal, feed_dict={data: batch_train_X, target: batch_train_y})
temp_loss = sess.run(loss, feed_dict={data: batch_train_X, target:
batch_train_y})
# convert into a matrix, and the shape of the placeholder to correspond
temp_train_acc = sess.run(accuracy, feed_dict={data: train_X, target:
np.matrix(train_y).T})
temp_test_acc = sess.run(accuracy, feed_dict={data: test_X, target: np.matrix(test_y).T})
# accuracy
plt.plot(train_acc, 'b-', label='train accuracy')
plt.plot(test_acc, 'k-', label='test accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Train and Test Accuracy')
plt.legend(loc='best')
plt.show()
Result/Output:
Conclusion:
Practical -6
Theory:
model = tf.keras.models.Sequential()
#Add the layers
model.add(tf.keras.layers.Dense(4,input_dim=2,activation='relu'))
model.add(tf.keras.layers.Dense(1,activation='sigmoid'))
model.compile(loss='mean_squared_error',optimizer='adam',metrics=['binary_accuracy'])
model.summary()
history = model.fit(training_data, target_data, epochs=500, verbose=2)
print (model.predict(training_data).round())
loss_curve = history.history["loss"]
acc_curve = history.history["binary_accuracy"]
plt.plot(loss_curve, label="Train")
plt.legend(loc='upper left')
plt.title("Loss")
plt.show()
plt.plot(acc_curve, label="Train")
plt.legend(loc='upper left')
plt.title("Accuracy")
plt.show()
Simple Neural Network in Python.
import numpy as np
# Sigmoid function
def nonlin(x, deriv=False):
if(deriv == True):
return x*(1-x)
return 1/(1+np.exp(-x))
X = np.array([
[0,0,1],
[0,1,1],
[0,1,0],
[1,1,1]
])
y = np.array([[0,0,1,1]]).T
np.random.seed(1)
#initialize weight
w0 = 2 * np.random.random((3,1)) -1
Result/Output:
Conclusion:
Practical -7
Theory: Forward propagation (or forward pass) refers to the calculation and storage of
intermediate variables (including outputs) for a neural network in order from the input layer to the
output layer. We now work step-by-step through the mechanics of a neural network with one
hidden layer.
from sklearn.model_selection
import train_test_split
#Splitting the data into training and testing data
X_train, X_val, Y_train, Y_val = train_test_split(data, labels, stratify=labels, random_state=0)
print(X_train.shape, X_val.shape)
class FeedForwardNetwork:
def __init__(self):
np.random.seed(0)
self.w1 = np.random.randn()
self.w2 = np.random.randn()
self.w3 = np.random.randn()
self.w4 = np.random.randn()
self.w5 = np.random.randn()
self.w6 = np.random.randn()
self.b1 = 0 self.b2 = 0 self.b3 = 0
class FeedForwardNetwork_Vectorised:
def __init__(self):
np.random.seed(0)
self.W1 = np.random.randn(2,2)
self.W2 = np.random.randn(2,1)
self.B1 = np.zeros((1,2))
self.B2 = np.zeros((1,1))
Result/Output:
Conclusion:
Practical -8
# Load dataset
data = load_iris()
y[:3]
# Initialize variables
learning_rate = 0.1
iterations = 5000
N = y_train.size
# number of input features
input_size = 4
# number of hidden layers neurons
hidden_size = 2
# number of neurons at the output layer
output_size = 3
results = pd.DataFrame(columns=["mse", "accuracy"])
# Initialize weights
np.random.seed(10)
# initializing weight for the hidden layer
W1 = np.random.normal(scale=0.5, size=(input_size, hidden_size))
# initializing weight for the output layer
W2 = np.random.normal(scale=0.5, size=(hidden_size , output_size))
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def mean_squared_error(y_pred, y_true):
return ((y_pred - y_true)**2).sum() / (2*y_pred.size)
def accuracy(y_pred, y_true):
acc = y_pred.argmax(axis=1) == y_true.argmax(axis=1)
return acc.mean()
# feedforward propagation
# on hidden layer
Z1 = np.dot(X_train, W1)
A1 = sigmoid(Z1)
# on output layer
Z2 = np.dot(A1, W2)
A2 = sigmoid(Z2)
# Calculating error
mse = mean_squared_error(A2, y_train)
acc = accuracy(A2, y_train)
results=results.append({"mse":mse, "accuracy":acc},ignore_index=True )
# backpropagation
E1 = A2 - y_train
dW1 = E1 * A2 * (1 - A2)
E2 = np.dot(dW1, W2.T)
dW2 = E2 * A1 * (1 - A1)
# weight updates
W2_update = np.dot(A1.T, dW1) / N
W1_update = np.dot(X_train.T, dW2) / N
W2 = W2 - learning_rate * W2_update
W1 = W1 - learning_rate * W1_update
results.accuracy.plot(title="Accuracy")
# feedforward
Z1 = np.dot(X_test, W1)
A1 = sigmoid(Z1)
Z2 = np.dot(A1, W2)
A2 = sigmoid(Z2)
acc = accuracy(A2, y_test)
print("Accuracy: {}".format(acc))
Import numpy as np
class NeuralNetwork:
def __init__(self):
np.random.seed(10) # for generating the same results
self.wij = np.random.rand(3,4) # input to hidden layer weights
self.wjk = np.random.rand(4,1) # hidden layer to output weights
Result/Output:
Conclusion:
Project-I
Project-II