You are on page 1of 4

DELHI TECHNOLOGICAL UNIVERSITY

DEPARTMENT OF COMPUTER ENGINEERING

ASSIGNMENT

Information and Network Security


CO429

Submitted by:
MOHAMED AHMED
2K20/CO/270
➢ implementation of a neural network with at least two layers, where you have the activation
function defined as the sigmoid function in Python using numpy.

❖ The Code:

import numpy as np

def sigmoid(x):
"""Sigmoid activation function"""
return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
"""Derivative of sigmoid function"""
return x * (1 - x)

# Define the network architecture


input_nodes = 2
hidden_nodes = 2
output_nodes = 2

# Initialize weights and biases with random values


input_to_hidden_weights = np.random.rand(input_nodes, hidden_nodes)
hidden_biases = np.random.rand(1, hidden_nodes)

hidden_to_output_weights = np.random.rand(hidden_nodes, output_nodes)


output_biases = np.random.rand(1, output_nodes)

# Define the learning rate


lr = 0.01

def forward_pass(inputs):
"""Compute the forward pass of the network"""
hidden_layer_input = np.dot(inputs, input_to_hidden_weights) + hidden_biases
hidden_layer_output = sigmoid(hidden_layer_input)

output_layer_input = np.dot(hidden_layer_output, hidden_to_output_weights) + output_biases


final_output = sigmoid(output_layer_input)

return hidden_layer_output, final_output

def backpropagation(inputs, target, hidden_output, final_output):


"""Backpropagation to adjust weights and biases"""

output_error = target - final_output


output_delta = output_error * sigmoid_derivative(final_output)

hidden_layer_error = output_delta.dot(hidden_to_output_weights.T)
hidden_layer_delta = hidden_layer_error * sigmoid_derivative(hidden_output)

# Update the weights and biases


global input_to_hidden_weights, hidden_biases, hidden_to_output_weights, output_biases

input_to_hidden_weights += lr * inputs.T.dot(hidden_layer_delta)
hidden_biases += np.sum(hidden_layer_delta, axis=0, keepdims=True)

hidden_to_output_weights += lr * hidden_output.T.dot(output_delta)
output_biases += np.sum(output_delta, axis=0, keepdims=True)

# Train the neural network


epochs = 10000
for epoch in range(epochs):
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Example inputs
target = np.array([[0, 1], [1, 0], [1, 0], [0, 1]]) # Example targets for demonstration
for i in range(len(inputs)):
hidden_output, final_output = forward_pass(inputs[i])
backpropagation(np.array([inputs[i]]), np.array([target[i]]), hidden_output, final_output)

if epoch % 1000 == 0:
print(f"Epoch {epoch}, Loss: {np.mean(np.square(target - forward_pass(inputs)[1]))}")

# Test
hidden_output, final_output = forward_pass(np.array([0, 1]))
print(final_output)

➢ The given code creates a simple feedforward neural network and trains it on XOR-like
data for demonstration purposes.

You might also like