You are on page 1of 29

MSG-SGKM COLLEGE ARTS, SCIENCE AND COMMERCE

Tilak Road Ghatkopar(E), Mumbai 400 077,

DEPARTMENT OF INFORMATION TECHNOLOGY

2020-2021

Certificate
This is to certify that Mr./Miss. _ABHISHEK SURESH PAL_____________________

With Seat No._____21903__________ has successfully completed the necessary

Course of experiments in the subject of Operating Systems during the Academic

year 2020-2021 complying with the requirements of University of Mumbai for the

program B.Sc. (Computer Science) in Sem _3_

______________________ _________________________
Internal Examiner External Examiner

_____________________
Co-Ordinator Stamp
INDEX
Sr Page TOPICS Date of Date of Remarks
no. no. experiment submission
1 3 The Producer - Consumer problem using 05/12/2020 08/12/2020
Shared Memory
2 6 The Readers - Writers Problem 05/12/2020 08/12/2020

3 13 Implement FCFS scheduling algorithm in C 05/12/2020 08/12/2020


program

4 16 Implement SJF scheduling algorithm in C 05/12/2020 08/12/2020


program

5 20 Implement RR scheduling algorithm in C 05/12/2020 08/12/2020


program

6 24 FIFO page replacement algorithm 05/12/2020 08/12/2020

7 27 LRU page replacement algorithm 05/12/2020 08/12/2020

Practical 1
Aim: The Producer – Consumer problem using Shared Memory Theory:
The Producer-Consumer issue is a Classic Synchronization Problem. It is also
known as Bounded Buffer Problem. This problem focuses primarily on two different
tasks: Producer and Consumer. Both of them share a fixed size and a common buffer.
● The producer creates data and puts it into the buffer and restarts it.
● The consumer consumes the data. In other words, the consumer removes the
data from the buffer that the producer has created.

The producer and consumer problem are to ensure that the producer should not
create data into the buffer memory once it gets full and simultaneously, the consumer
should not remove data from a buffer memory that is empty. The Producer-Consumer
problem can be resolved by placing a semaphore in the buffer.
Input:
#include<stdio.h>
#include<pthread.h>
#include<stdlib.h>
#define Buffer_Limit 10

int Buffer_Index_Value = 0; char


*Buffer_Queue;

pthread_mutex_t mutex_variable = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t


Buffer_Queue_Not_Full = PTHREAD_COND_INITIALIZER; pthread_cond_t
Buffer_Queue_Not_Empty = PTHREAD_COND_INITIALIZER;

void *Consumer()
{
while(1)
{

pthread_mutex_lock(&mutex_variable);
if(Buffer_Index_Value == -1)

pthread_cond_wait(&Buffer_Queue_Not_Empty, &mutex_variable);
}
printf("Consumer:%d\t", Buffer_Index_Value--);

pthread_mutex_unlock(&mutex_variable);

pthread_cond_signal(&Buffer_Queue_Not_Full);

void *Producer()
{

while(1)
{

pthread_mutex_lock(&mutex_variable);

if(Buffer_Index_Value == Buffer_Limit)
{
pthread_cond_wait(&Buffer_Queue_Not_Full, &mutex_variable)

Buffer_Queue[Buffer_Index_Value++] = '@'; printf("Producer:%d\t",


Buffer_Index_Value); pthread_mutex_unlock(&mutex_variable);
pthread_cond_signal(&Buffer_Queue_Not_Empty);
}
}
int main()
{
pthread_t producer_thread_id, consumer_thread_id;
Buffer_Queue = (char *) malloc(sizeof(char) * Buffer_Limit);
pthread_create(&producer_thread_id, NULL, Producer, NULL);
pthread_create(&consumer_thread_id, NULL, Consumer, NULL);
pthread_join(producer_thread_id, NULL);
pthread_join(consumer_thread_id, NULL);
return 0;
}

Output:
Practical 2 Aim:
The Reader - Writers Problem Theory:
The readers-writers problem relates to an object such as a file that is shared
between multiple processes. Some of these processes are readers i.e., they only want
to read the data from the object and some of the processes are writers i.e., they want to
write into the object.
The readers-writers problem is used to manage synchronization so that there are
no problems with the object data. For example - If two readers access the object at the
same time there is no problem. However, if two writers or a reader and writer access the
object at the same time, there may be problems.

To solve this situation, a writer should get exclusive access to an object i.e. when
a writer is accessing the object, no reader or writer may access it.
However, multiple readers can access the object at the same time. This
can be implemented using semaphores.

Input:

#include <iostream>
#include <pthread.h> #include
<unistd.h>
using namespace std;

class monitor { private:

// no. of readers int rcnt;

// no. of writers int wcnt;

// no. of readers waiting int waitr;


// no. of writers waiting int waitw;

// condition variable to check whether reader can read pthread_cond_t canread;

// condition variable to check whether writer can write pthread_cond_t canwrite;

// mutex for synchronisation

pthread_mutex_t condlock;
public: monitor()
{

rcnt = 0; wcnt
= 0; waitr =
0;
waitw = 0;

pthread_cond_init(&canread, NULL); pthread_cond_init(&canwrite,


NULL);
pthread_mutex_init(&condlock, NULL);

// mutex provide synchronisation so that no other thread


// can change the value of data

void beginread(int i)

pthread_mutex_lock(&condlock);

// if there are active or waiting writers if (wcnt == 1 || waitw > 0) {


// incrementing waiting readers waitr++;
// reader suspended

pthread_cond_wait(&canread, &condlock); waitr--;

// else reader reads the resource rcnt++;


cout << "reader " << i << " is reading\n"; pthread_mutex_unlock(&condlock);
pthread_cond_broadcast(&canread);

void endread(int i)

// if there are no readers left then writer enters monitor


pthread_mutex_lock(&condlock);

if (--rcnt == 0)

pthread_cond_signal(&canwrite);
pthread_mutex_unlock(&condlock);

void beginwrite(int i)

pthread_mutex_lock(&condlock);

// a writer can enter when there are no active


// or waiting readers or other writer

if (wcnt == 1 || rcnt > 0) {

++waitw;

pthread_cond_wait(&canwrite, &condlock);--waitw;

wcnt = 1; cout << "writer " << i << " is


writing\n";
pthread_mutex_unlock(&condlock);

void endwrite(int i)
{

pthread_mutex_lock(&condlock);
wcnt = 0;

// if any readers are waiting, threads are unblocked if (waitr > 0)

pthread_cond_signal(&canread);
else
pthread_cond_signal(&canwrite); pthread_mutex_unlock(&condlock);

// global object of monitor class M;

void* reader(void* id)

int c = 0;

int i = *(int*)id;

// each reader attempts to read 5 times while (c < 5)


{ usleep(1); M.beginread(i);
M.endread(i);

c++;

void* writer(void* id)

int c = 0; int i
= *(int*)id;
// each writer attempts to write 5 times while (c < 5)
{ usleep(1); M.beginwrite(i); M.endwrite(i); c++;

int main()

pthread_t r[5], w[5]; int


id[5];
for (int i = 0; i < 5; i++) {
id[i] = i;
// creating threads which execute reader function pthread_create(&r[i], NULL,
&reader, &id[i]);
// creating threads which execute writer function pthread_create(&w[i], NULL,
&writer, &id[i]);

for (int i = 0; i < 5; i++) {

pthread_join(r[i], NULL);

for (int i = 0; i < 5; i++) {

pthread_join(w[i], NULL);

}
Output:

Turnaround time:
• Turnaround time is the time interval between the submission of a process and
its completion
• Turnaround time = completion of a process – submission of a process
Waiting time:
• Waiting time is the difference between turnaround time and burst time.
• Waiting time = turnaround time – burst time.

Completion time:
Completion time is the time required by the process to complete its execution Practical 3
Aim: Implement FCFS scheduling algorithm in C program Theory:
FCFS Scheduling:
The first come first serve algorithm is commonly abbreviated as FCFS algorithm. It
primarily works on the First In First Out (FIFO) principle.
The incoming requests or jobs in the system queue are executed based on first come
first served basis. This is a non-preemptive scheduling algorithm.
Therefore, once the CPU is allocated to a particular job, the job keeps on executing till it
gets completed. The CPU cannot leave the current job before it gets completed. So, the
CPU cannot move to another job in the queue.
The FCFS algorithm is usually represented using Gantt’s Chart on paper. However, the
FCFS scheduling algorithm is not so efficient when it comes to performance
optimization.
It is not optimized for time-sharing systems. The average waiting time for the first come
first serve scheduling algorithm is highly dependent on the burst time of the jobs.

Advantages:
● Simple and easy to implement
● Every process/job gets executed completely
● Lower possibilities of starvation

Disadvantages:
● Poor performance due to high average waiting time ● There is no option for pre-
emption of a job.
● Higher average turnaround time
● In-efficient for time-sharing systems
Input:
#include<stdio.h> int
main()
{
float burst_time[30], waiting_time[30], turnaround_time[30]; float average_waiting_time
= 0.0, average_turnaround_time = 0.0; int count, j, total_process; printf("Enter The
Number of Processes To Execute:\t"); scanf("%d", &total_process); printf("\nEnter The
Burst Time of Processes:\n\n"); for(count = 0; count < total_process; count++)
{
printf("Process [%d]:", count + 1); scanf("%f",
&burst_time[count]);
}

waiting_time[0] = 0; for(count = 1; count <


total_process; count++)
{
waiting_time[count] = 0; for(j
= 0; j < count; j++)
{
waiting_time[count] = waiting_time[count] + burst_time[j];
}
}
printf("\nProcess\t\tBurst Time\tWaiting Time\tTurnaround Time\n"); for(count = 0; count
< total_process; count++)
{
turnaround_time[count] = burst_time[count] + waiting_time[count];
average_waiting_time = average_waiting_time + waiting_time[count];
average_turnaround_time = average_turnaround_time + turnaround_time[count];
printf("\nProcess [%d]\t\t%.2f\t\t%.2f\t\t%.2f", count + 1, burst_time[count],
waiting_time[count], turnaround_time[count]);
} printf("\n");
average_waiting_time = average_waiting_time / count; average_turnaround_time =
average_turnaround_time / count; printf("\nAverage Waiting Time = %f",
average_waiting_time); printf("\nAverage Turnaround Time = %f",
average_turnaround_time); printf("\n"); return 0;
}

Output:
Practical 4 Aim:
Implement SJF scheduling algorithm in C program
Theory:
SJF scheduling algorithm
The shortest job first scheduling algorithm is a very popular job scheduling algorithm in
operating systems. This algorithm is designed to overcome the shortcomings of the
FCFS algorithm.
The SJF algorithm is also popularly known by the following names:
● Shortest Job Next algorithm
● Shortest Remaining Time First algorithm
● Shortest Process Next algorithm

Initially, the job queue contains multiple processes for execution. According to the SJF
algorithm, the processes are compared with each other and the process that has the
shortest burst time (execution time) gets executed first.
The remaining processes are also executed in the order of their burst times. If there are
two or more processes having the same burst time, the processes are executed in the
order of their arrival.
This is a non-preemptive algorithm which means that the CPU cannot leave a process
in execution and start execution of another process.
Once the CPU starts execution of a job, it has to complete it successfully and then it can
move to any other process in the job queue.
Note: This SJF non preemptive scheduling program in c with output does not consider
arrival time of the processes entering the job queue.

Advantages
● The response time of the processes is better.
● The throughput time is much better as the time taken for execution is much less.
● The overall performance of the system is efficient.
Disadvantages
● The execution time of all the processes in the job queue must be known in
advance to apply the algorithm efficiently to all the jobs.
● The processes with larger execution time will have a higher waiting time, and this
may lead to starvation.

Input:
#include <stdio.h> int
main()

int temp, i, j, limit, sum = 0, position; float


average_wait_time, average_turnaround_time;
int burst_time[20], process[20], waiting_time[20], turnaround_time[20]; printf("\nEnter
Total Number of Processes:\t");
scanf("%d", &limit); for(i = 0; i <
limit; i++)

printf("Enter Burst Time For Process[%d]:\t", i + 1); scanf("%d", &burst_time[i]);


process[i] = i + 1;
} for(i = 0; i < limit;
i++)

position = i; for(j = i + 1;
j < limit; j++)
{
if(burst_time[j] < burst_time[position])
{ position =
j;
}

temp = burst_time[i]; burst_time[i]


= burst_time[position];
burst_time[position] = temp; temp
= process[i]; process[i] =
process[position];
process[position] = temp;
}

waiting_time[0] = 0; for(i
= 1; i < limit; i++)
{
waiting_time[i] = 0; for(j
= 0; j < i; j++
{
waiting_time[i] = waiting_time[i] + burst_time[j];
}
sum = sum + waiting_time[i];
}
average_wait_time = (float)sum / limit; sum = 0; printf("\nProcess
ID\t\tBurst Time\t Waiting Time\t Turnaround Time\n"); for(i = 0; i < limit;
i++)
{
turnaround_time[i] = burst_time[i] + waiting_time[i]; sum = sum + turnaround_time[i];
printf("\nProcess[%d]\t\t%d\t\t %d\t\t %d\n", process[i], burst_time[i], waiting_time[i],
turnaround_time[i]);
}
average_turnaround_time = (float)sum / limit; printf("\nAverage Waiting Time:\t%f\n",
average_wait_time); printf("\nAverage Turnaround Time:\t%f\n",
average_turnaround_time); return 0;
}
Output:
Practical 5 Aim:
Implement RR scheduling algorithm in C program
Theory:
Round Robin Scheduling Algorithm:
Before beginning with the C program implementation, let us first understand the
conceptual theory of the Round Robin Scheduling Algorithm.
● The Round robin algorithm is a pre-emptive process scheduling algorithm. Here,
every job request in the queue is associated with a fixed execution time called
quantum.
● A pre-emptive process enables the job scheduler to pause a process under
execution and move to the next process in the job queue.
● The job scheduler saves the current state of the job and moves to another job in
the queue as soon as a particular process/job is executed for a given time
quantum.
● The context switch saves the current state of the process. This algorithm is
beneficial in terms of its response time.
● In the round robin algorithm, every process gets an equal time of execution which
is defined by the quantum time.
● Therefore, no process will be able to hold the CPU for a longer time period.
● The round-robin job scheduling algorithm is, therefore, used in a multi-user, time-
sharing or multi-tasking operating systems.
● As a result, it is probably the best scheduling algorithm in operating systems for
distributed terminal response time.
● Furthermore, the efficiency of this algorithm is totally dependent on the size of
the time quantum and the number of context switches that occur.
Advantages:
● The decision-making overhead is very low.
● Equal priority is given to each job within the queue, unlike other scheduling
algorithms.
● Hence, starvation does not occur so frequently.

Disadvantages:
● The throughput in the round-robin algorithm is highly dependent on the quantum
length.
● If the quantum is less, then the process switching occurs frequently which
decreases the efficiency.
● If the quantum is more, the system may become unresponsive.
The RR scheduling algorithm is quite simple to implement, and the overhead in
decision-making is very low. This algorithm is good evenly distributes the terminal
response time Input:
#include<stdio.h>
int main()
{
int i, limit, total = 0, x, counter = 0, time_quantum; int wait_time = 0, turnaround_time
= 0, arrival_time[10], burst_time[10], temp[10]; float average_wait_time,
average_turnaround_time; printf("\nEnter Total Number of Processes:\t"); scanf("%d",
&limit); x = limit;

for(i = 0; i < limit; i++)


{
printf("\nEnter Details of Process[%d]\n", i + 1);
printf("Arrival Time:\t"); scanf("%d",
&arrival_time[i]); printf("Burst Time:\t");
scanf("%d", &burst_time[i]); temp[i] =
burst_time[i];
}
printf("\nEnter Time Quantum:\t"); scanf("%d", &time_quantum);
printf("\nProcess ID\t\tBurst Time\t Turnaround Time\t Waiting Time\n");
for(total = 0, i = 0; x != 0;)

{
if(temp[i] <= time_quantum && temp[i] > 0)
{
total = total + temp[i];
temp[i] = 0; counter
= 1;
}
else if(temp[i] > 0)
{
temp[i] = temp[i] - time_quantum; total
= total + time_quantum;
}
if(temp[i] == 0 && counter == 1)
{ x--; printf("\nProcess[%d]\t\t%d\t\t %d\t\t\t %d", i + 1, burst_time[i], total -
arrival_time[i], total
- arrival_time[i] - burst_time[i]); wait_time = wait_time + total - arrival_time[i] -
burst_time[i]; turnaround_time = turnaround_time + total - arrival_time[i];
counter = 0;
} if(i == limit -
1)
{i=
0;
}
else if(arrival_time[i + 1] <= total)
{
i++;
}
else
{i=
0;
}
}
average_wait_time = wait_time * 1.0 / limit; average_turnaround_time
= turnaround_time * 1.0 / limit; printf("\n\nAverage Waiting Time:\t%f",
average_wait_time); printf("\nAvg Turnaround Time:\t%f\n",
average_turnaround_time); return 0;
}

Output:

Practical 6
Aim: FIFO page-replacement algorithm Theory:
FIFO Page-replacement algorithm
When a page fault occurs, the OS has to remove a page from the memory so that it can
fit in another page in the memory.
These page replacement algorithms are used in operating systems that support virtual
memory management.
FIFO Page Replacement technique is one of the simplest one to implement amongst
other page replacement algorithms. It is a conservative algorithm.
It is a low-overhead algorithm that maintains a queue to keep a track of all the pages in
a memory.
When a page needs to be replaced, the page at the FRONT of the Queue will be
replaced. The FIFO page replacement technique is not implemented in operating
systems nowadays.
Output:
#include<stdio.h> int
main()
{
int reference_string[10], page_faults = 0, m, n, s, pages, frames; printf("\nEnter Total
Number of Pages:\t"); scanf("%d",
&pages);
printf("\nEnter values of Reference String:\n"); for(m = 0; m < pages; m++) {
printf("Value No. [%d]:\t", m + 1); scanf("%d", &reference_string[m]);
}
printf("\nEnter Total Number of Frames:\t");
{
scanf("%d", &frames);
}
int temp[frames];
for(m = 0; m < frames; m++)
{
temp[m] = -1;
}
for(m = 0; m < pages; m++)
{
s = 0; for(n = 0; n <
frames; n++)
{
if(reference_string[m] == temp[n])
{
s++; page_faults--;
}
}
page_faults++; if((page_faults <=
frames) && (s == 0))
{
temp[m] = reference_string[m];
}
else if(s == 0)
{
temp[(page_faults - 1) % frames] = reference_string[m];
} printf("\n"); for(n = 0; n <
frames; n++)
{
printf("%d\t", temp[n]);
}
}
printf("\nTotal Page Faults:\t%d\n", page_faults); return
0;
}
Output:
Practical 7
Aim: LRU Page-Replacement algorithm Theory:
LRU Page-Replacement algorithm
The page replacement algorithms help an operating system in deciding the memory
pages that needs to be swapped out, written to the disk when a page of memory needs
to be allocated in the system.
The LRU Page Replacement method is a marking algorithm. It keeps a track of the
page usage in a given period of time. The LRU algorithm offers
The LRU algorithm offers optimum performance but is costly in its implementation. The
LRU page replacement technique is modified for implementation, and its successors are
The LRU page replacement technique is modified for implementation, and its
successors are LRU – K and ARC algorithms.
Input:
#include<stdio.h> int
main()
{
int frames[10], temp[10], pages[10]; int total_pages, m, n, position, k, l, total_frames;
int a = 0, b = 0, page_fault = 0; printf("\nEnter Total Number of Frames:\t");
scanf("%d", &total_frames); for(m = 0; m < total_frames; m++)
{
frames[m] = -1;
}
printf("Enter Total Number of Pages:\t"); scanf("%d", &total_pages); printf("Enter
Values for Reference String:\n"); for(m = 0; m < total_pages; m++)
{
printf("Value No.[%d]:\t", m + 1); scanf("%d",
&pages[m]);
}
for(n = 0; n < total_pages; n++)
{
a = 0, b = 0; for(m = 0; m <
total_frames; m++)
{
if(frames[m] == pages[n])
{
a = 1; b
= 1;
break;
}
}
if(a == 0)
{
for(m = 0; m < total_frames; m++)
{
if(frames[m] == -1)
{
frames[m] = pages[n];
b = 1; break;
}
}
}
if(b == 0)
{
for(m = 0; m < total_frames; m++)
{
temp[m] = 0;
}
for (k = n - 1, l = 1; l <= total_frames - 1; l++, k--)
{
for(m = 0; m < total_frames; m++)
{
if(frames[m] == pages[k])
{
temp[m] = 1;
}
}
}
for(m = 0; m < total_frames; m++)
{
if(temp[m] == 0) position
= m;
}
frames[position] = pages[n]; page_fault++;
} printf("\n");
for(m = 0; m < total_frames; m++)
{
printf("%d\t", frames[m]);
}
}
printf("\nTotal Number of Page Faults:\t%d\n", page_fault); return
0;
}
Output:

You might also like