You are on page 1of 23

EXP NO 2

Program: -

def extract_macros(source_code):
macros = []
lines = source_code.split('\n')
index = 0
for line in lines:
if line.strip().startswith("MACRO"):
macro_name = lines[index+1].strip().split()[0]
macros.append((macro_name, index+1))
index += 1
return macros

def display_macro_name_table(macros):
print("Macro Name Table:")
print("Index\tMacro Name\tMDT Index")
for i, (macro_name, index) in enumerate(macros, start=1):
print(f"{i}\t{macro_name}\t\t{index}")

# Step 1: Open and read the source code file


with open('prog.txt', 'r') as file:
source_code = file.read()

# Step 2: Display the content of the source code


print("Source Code:")
print(source_code)

# Step 3: Extract macro name table


macros = extract_macros(source_code)

# Step 4: Display macro name table


display_macro_name_table(macros)

prog.txt

MACRO
INCR1 &ARG1,&ARG2
A 1,&ARG1
L 2,&ARG2
MEND
MACRO
INCR2 &ARG1,&ARG2
M 1,&ARG1
S 2,&ARG2
MEND
MACRO
COMPLEX &X
INCR1 &X,DATA1
INCR2 &X,DATA2
MEND
OUTPUT: -

Source Code:

MACRO

INCR1 &ARG1,&ARG2

A 1,&ARG1

L 2,&ARG2

MEND

MACRO

INCR2 &ARG1,&ARG2

M 1,&ARG1

S 2,&ARG2

MEND

MACRO

COMPLEX &X

INCR1 &X,DATA1

INCR2 &X,DATA2

MEND

Macro Name Table:

Index Macro Name MDT Index

1 INCR1 1

2 INCR2 6

3 COMPLEX 11
EXP NO 3
Program: -

# Define functions to identify tokens


def is_keyword(word):
keywords = ['if', 'else', 'while', 'for', 'int', 'float', 'char', 'return']
return word in keywords
def is_identifier(word):
return word.isidentifier() and not is_keyword(word)
def is_integer_literal(word):
try:
int(word)
return True
except ValueError:
return False
def is_float_literal(word):
try:
float(word)
return True
except ValueError:
return False
def is_operator(word):
operators = ['+', '-', '*', '/', '=', '==', '!=', '<', '>', '<=', '>=']
return word in operators
def is_delimiter(word):
delimiters = ['(', ')', '{', '}', '[', ']', ',', ';']
return word in delimiters
# Define function for lexical analysis
def lexical_analyzer(code):
tokens = []
lines = code.split('\n')
for line in lines:
words = line.split()
for word in words:
if is_keyword(word):
tokens.append((word, 'Keyword'))
elif is_identifier(word):
tokens.append((word, 'Identifier'))
elif is_integer_literal(word):
tokens.append((word, 'Integer Literal'))
elif is_float_literal(word):
tokens.append((word, 'Float Literal'))
elif is_operator(word):
tokens.append((word, 'Operator'))
elif is_delimiter(word):
tokens.append((word, 'Delimiter'))
else:
tokens.append((word, 'Unknown'))

return tokens
# Main program
if __name__ == "__main__":
code = """int main() {
int x = 10 ;
float y = 3.14 ;
if ( x == 10 ) {
printf ( " Hello , World ! " ) ;
}
}"""

print("Input Code:")
print(code)
print("\nLexical Analysis:")
tokens = lexical_analyzer(code)
for token in tokens:
print(token[0], "-", token[1])

OUTPUT: -

Input Code:
int main() {
int x = 10 ;
float y = 3.14 ;
if ( x == 10 ) {
printf ( " Hello , World ! " ) ;
}
}
Lexical Analysis:
int - Keyword
main() - Unknown
{ - Delimiter
int - Keyword
x - Identifier
= - Operator
10 - Integer Literal
; - Delimiter
float - Keyword
y - Identifier
= - Operator
3.14 - Float Literal
; - Delimiter
if - Keyword
( - Delimiter
x - Identifier
== - Operator
10 - Integer Literal
) - Delimiter
{ - Delimiter
printf - Identifier
( - Delimiter
" - Unknown
Hello - Identifier
, - Delimiter
World - Identifier
! - Unknown
" - Unknown
) - Delimiter
; - Delimiter
} - Delimiter
} – Delimiter
EXP NO 4
Program: -

def eliminate_left_recursion(grammar):
non_terminals = list(grammar.keys())

for A in non_terminals:
productions_A = grammar[A]

# Split productions into left-recursive and non-left-recursive


left_recursive_productions = [prod for prod in productions_A if
prod.startswith(A)]
non_left_recursive_productions = [prod for prod in productions_A if not
prod.startswith(A)]

if not left_recursive_productions:
continue

# Create a new non-terminal A' for the non-left-recursive productions


A_prime = A + "'"
grammar[A_prime] = [prod[len(A):] + A_prime for prod in
left_recursive_productions] + ["ε"]

# Update original non-terminal A with non-left-recursive productions followed by


A'
grammar[A] = [prod + A_prime for prod in non_left_recursive_productions]

return grammar

# Function to parse user input for grammar


def parse_grammar():
grammar = {}
while True:
production = input("Enter production (or type 'done' to finish): ").strip()
if production.lower() == 'done':
break
non_terminal, rhs = production.split('->')
non_terminal = non_terminal.strip()
rhs = [symbol.strip() for symbol in rhs.split('|')]
grammar[non_terminal] = rhs
return grammar

# Main program
if __name__ == "__main__":
print("Enter the grammar productions:")
user_grammar = parse_grammar()

# Eliminate left recursion


eliminated_grammar = eliminate_left_recursion(user_grammar)

# Print the modified grammar


print("\nModified Grammar after eliminating left recursion:")
for non_terminal, productions in eliminated_grammar.items():
print(f"{non_terminal} -> {' | '.join(productions)}")
OUTPUT: -

Enter the grammar productions:


Enter production (or type 'done' to finish): E -> E + T | T
Enter production (or type 'done' to finish): T -> T * F | F
Enter production (or type 'done' to finish): F -> (E)|id
Enter production (or type 'done' to finish): done

Modified Grammar after eliminating left recursion:


E -> TE'
T -> FT'
F -> (E) | id
E' -> + TE' | ε
T' -> * FT' | ε
EXP NO 5
Program: -

gram = {
"E":["2E2","3E3","4"]
}
starting_terminal = "E"
inp="2324232$"
stack = "$"
print(f'{"Stack": <15}'+"/"+f'{"Input Buffer": <15}'+"|"+f'Parsing Action')
print(f'{"-":-<50}')
while True:
action = True
i=0
while i<len(gram[starting_terminal]):
if gram[starting_terminal][i] in stack:
stack = stack.replace(gram[starting_terminal][i],starting_terminal)
print(f'{stack: <15}'+"|"+f'{inp: <15}'+"|"+f'Reduce S-
>{gram[starting_terminal][i]}')
i=-1
action = False
i+=1
if len(inp)>1:
stack+=inp[0]
inp=inp[1:]
print(f'{stack: <15}'+"/"+f'{inp: <15}'+"|"+f'Shift')
action = False
if inp == "$" and stack ==("$"+starting_terminal):
print(f'{stack: <15}'+"/"+f'{inp: <15}'+"/"+f'Accepted')
break
if action:
print(f'{stack: <15}'+"|"+f'{inp: <15}'+"|"+f'Rejected')
break
OUTPUT: -

Stack /Input Buffer |Parsing Action


--------------------------------------------------
$2 /324232$ |Shift
$23 /24232$ |Shift
$232 /4232$ |Shift
$2324 /232$ |Shift
$232E |232$ |Reduce S->4
$232E2 /32$ |Shift
$23E |32$ |Reduce S->2E2
$23E3 /2$ |Shift
$2E |2$ |Reduce S->3E3
$2E2 /$ |Shift
$E |$ |Reduce S->2E2
$E /$ /Accepted
EXP NO 6
Program: -

OPERATORS = set(['+', '-', '*', '/', '(', ')'])


PRI = {'+':1, '-':1, '*':2, '/':2}

### INFIX ===> POSTFIX ###


def infix_to_postfix(formula):
stack = [] # only pop when the coming op has priority
output = ''
for ch in formula:
if ch not in OPERATORS:
output += ch
elif ch == '(':
stack.append('(')
elif ch == ')':
while stack and stack[-1] != '(':
output += stack.pop()
stack.pop() # pop '('
else:
while stack and stack[-1] != '(' and PRI[ch] <= PRI[stack[-1]]:
output += stack.pop()
stack.append(ch)
# leftover
while stack:
output += stack.pop()
print(f'POSTFIX: {output}')
return output

### INFIX ===> PREFIX ###


def infix_to_prefix(formula):
op_stack = []
exp_stack = []
for ch in formula:
if not ch in OPERATORS:
exp_stack.append(ch)
elif ch == '(':
op_stack.append(ch)
elif ch == ')':
while op_stack[-1] != '(':
op = op_stack.pop()
a = exp_stack.pop()
b = exp_stack.pop()
exp_stack.append( op+b+a )
op_stack.pop() # pop '('
else:
while op_stack and op_stack[-1] != '(' and PRI[ch] <= PRI[op_stack[-1]]:
op = op_stack.pop()
a = exp_stack.pop()
b = exp_stack.pop()
exp_stack.append( op+b+a )
op_stack.append(ch)
# leftover
while op_stack:
op = op_stack.pop()
a = exp_stack.pop()
b = exp_stack.pop()
exp_stack.append( op+b+a )
print(f'PREFIX: {exp_stack[-1]}')
return exp_stack[-1]

### THREE ADDRESS CODE GENERATION ###


def generate3AC(pos):
print("### THREE ADDRESS CODE GENERATION ###")
exp_stack = []
t = 1

for i in pos:
if i not in OPERATORS:
exp_stack.append(i)
else:
print(f't{t} := {exp_stack[-2]} {i} {exp_stack[-1]}')
exp_stack=exp_stack[:-2]
exp_stack.append(f't{t}')
t+=1

expres = input("INPUT THE EXPRESSION: ")


pre = infix_to_prefix(expres)
pos = infix_to_postfix(expres)
generate3AC(pos)

OUTPUT: -

INPUT THE EXPRESSION: a+b*9-d+c


PREFIX: +-+a*b9dc
POSTFIX: ab9*+d-c+
### THREE ADDRESS CODE GENERATION ###
t1 := b * 9
t2 := a + t1
t3 := t2 - d
t4 := t3 + c
EXP NO 7
Program: -

import re
expr_buffer = []
optm_code = []
with open(r"sample_code_unopt.txt") as source:
code = source.readlines()
source.close()
print("\nInput Expressions: \n")
for line in code:
print(line)
for line in code:
spline = line.replace(" ", "")
if "=" in spline:
expr = (re.findall(r"(\w)+=\S*", spline))
for y in expr_buffer:
for x in y:
if expr[0] == x:
expr_buffer.remove(y)
if "/" in spline:
expr = (re.findall(r"[a-zA-Z]+[\d]*/[a-zA-Z]+[\d]*", spline))
for x in expr:
if x not in expr_buffer:
expr_buffer.append(x)
s = "t"+ str(expr_buffer.index(x))
optm_code.append(s + " = " + x + "\n")
spline = spline.replace(x, s)
else:
s = "t" + str(expr_buffer.index(x))
spline = spline.replace(x, s)
if "*" in spline:
expr = (re.findall(r"[a-zA-Z]+[\d]*\*[a-zA-Z]+[\d]*", spline))
for x in expr:
if x not in expr_buffer:
expr_buffer.append(x)
s = "t"+ str(expr_buffer.index(x))
optm_code.append(s + " = " + x + "\n")
spline = spline.replace(x, s)
else:
s = "t" + str(expr_buffer.index(x))
spline = spline.replace(x, s)
if "+" in spline:
expr = (re.findall(r"[a-zA-Z]+[\d]*\+[a-zA-Z]+[\d]*", spline))
for x in expr:
if x not in expr_buffer:
expr_buffer.append(x)
s = "t"+ str(expr_buffer.index(x))
optm_code.append(s + " = " + x + "\n")
spline = spline.replace(x, s)
else:
s = "t" + str(expr_buffer.index(x))
spline = spline.replace(x, s)
if "-" in spline:
expr = (re.findall(r"[a-zA-Z]+[\d]*-[a-zA-Z]+[\d]*", spline))
for x in expr:
if x not in expr_buffer:
expr_buffer.append(x)
s = "t"+ str(expr_buffer.index(x))
optm_code.append(s + " = " + x + "\n")
spline = spline.replace(x, s)
else:
s = "t" + str(expr_buffer.index(x))
spline = spline.replace(x, s)
optm_code.append(spline)
print("\nOptimized Expressions with computed compiler variable assignments: \n")
for line in optm_code:
print(line)

OUTPUT: -

Input Expressions:

c = a/b * d

a = c * d

f = a/b * c/d + e

g = f * a/b - c/d

Optimized Expressions with computed compiler variable assignments:

t0 = a/b

t1 = t0*d

c=t1

t1 = c*d

a=t1

t2 = a/b

t3 = c/d

t4 = t2*t3

t5 = t4+e

f=t5

t6 = f*t2

t7 = t6-t3

g=t7
EXP NO 8
Program: -

import re

f = open("IntermediateCode.txt", 'r')
lines = f.readlines()
f.close()

fifo_return_reg = 'R0'
reg = [0] * 13
var = {}
store_seq = []
fifo_reg = 0
operator_list = {'+': 'ADD', '-': 'SUB', '*': 'MUL', '/': 'DIV', '=': 'MOV', '==': 'NE',
'>': 'G', '>=': 'GE', '<': 'L', '<=': 'LE', 'and': 'AND', 'or': 'OR'}

def fifo():
global fifo_reg
global fifo_return_reg
for k, v in var.copy().items():
if v == 'R' + str(fifo_reg):
fifo_return_reg = v
var.pop(k)
if k in store_seq:
store_seq.remove(k)
print("ST", k, ',', v, sep='')
fifo_reg = int(fifo_return_reg[1:]) + 1
return fifo_return_reg

def getreg():
for i in range(0, 13):
if reg[i] == 0:
reg[i] = 1
return 'R' + str(i)
register = fifo()
return register

for line in lines:


line = line.strip()
if not line:
continue

line = line.split()
length = len(line)

if length == 0:
continue

if length == 1:
print(line[0])
continue

if re.match(r'^t[0-9]+$', line[0]):
continue
if length == 3:
lhs = line[0]
operand = line[2]
if operand not in var:
var[operand] = getreg()
if operand.isalpha():
print("LD", var[operand], ', ', operand, sep="")
else:
print("MOV", var[operand], ', #', operand, sep="")
if lhs in store_seq:
old_reg = var[lhs]
store_seq.remove(lhs)
print("ST", lhs, ',', old_reg, sep='')
var[lhs] = var[operand]
store_seq.append(lhs)

elif 'goto' in line:


if 'if' in line:
operand = line[1]
label = line[3]
if operand not in var:
var[operand] = getreg()
if operand.isalpha():
print("LD", var[operand], ', ', operand, sep="")
else:
print("MOV", var[operand], ', #', operand, sep="")
print("BNEZ", var[operand], ',', label)
else:
print("BR", line[1])

else:
if len(line) >= 5:
oper = line[3]
operand1 = line[2]
operand2 = line[4]
lhs = line[0]
if operand1 not in var:
var[operand1] = getreg()
if operand1.isalpha():
print("LD", var[operand1], ', ', operand1, sep="")
else:
print("MOV", var[operand1], ', #', operand1, sep="")
if operand2 not in var:
var[operand2] = getreg()
if operand2.isalpha():
print("LD", var[operand2], ', ', operand2, sep="")
else:
print("MOV", var[operand2], ', #', operand2, sep="")
operator_print = operator_list.get(oper)
if operator_print:
if lhs in store_seq:
old_reg = var[lhs]
store_seq.remove(lhs)
print("ST", lhs, ',', old_reg, sep='')
var[lhs] = getreg()
store_seq.append(lhs)
print(operator_print, var[lhs], ',', var[operand1], ',', var[operand2],
sep=' ')

else:
operand = line[3]
lhs = line[0]
if operand not in var:
var[operand] = getreg()
if operand.isalpha():
print("LD", var[operand], ', ', operand, sep="")
else:
print("MOV", var[operand], ', #', operand, sep="")
if lhs in store_seq:
old_reg = var[lhs]
store_seq.remove(lhs)
print("ST", lhs, ',', old_reg, sep='')
var[lhs] = getreg()
store_seq.append(lhs)
print("NOT", var[lhs], ',', var[operand], sep='')

for i in store_seq:
print("ST", i, ',', var[i], sep='')

IntermediateCode.txt
a = 10
b = 9
t0= 10 + 9
tl = 19 + 100
c = 119
e = 10
f = 8
t2 = 10 * 8
d = 80
|0:
t3 = 10 >= 9
t4 = not 1
if 0 goto |1
a = 19
15 = 80 * 100
g = 8000
|1:
u = 10
j = 99
OUTPUT: -

MOVR0, #10
MOVR1, #9
NOTR2,R1
MOVR3, #19
MOVR4, #100
ADD R5 , R3 , R4
MOVR6, #119
MOVR7, #8
MOVR8, #80
|0:
MOVR9, #0
BNEZ R9 , |1
STa,R0
MUL R10 , R8 , R4
MOVR11, #8000
|1:
MOVR12, #99
STb,R1
STt0=,R2
STtl,R5
STc,R6
STe,R0
STf,R7
STd,R8
STa,R3
ST15,R10
STg,R11
STu,R0
STj,R
ASSIGNMENT 3

Q. 1 "What are the different types of system programs, and how do they contribute to the efficient
operation and management of computer systems?"
System programs are essential software components that facilitate the efficient operation and
management of computer systems. They perform various tasks ranging from controlling hardware devices
to managing system resources and providing essential utilities. Here are some different types of system
programs and their contributions to the efficient operation and management of computer systems:
1. Operating System (OS):

The operating system is the core system software that manages computer hardware resources and
provides essential services to user applications.
It includes components such as the kernel, which interacts directly with hardware, and system services
such as process management, memory management, file system management, and input/output (I/O)
management.
The OS ensures efficient allocation and utilization of system resources, facilitates multitasking and
multiprocessing, and provides a platform for running applications.
2. Device Drivers:

Device drivers are software components that enable communication between the operating system and
hardware devices such as printers, network adapters, graphics cards, and storage devices.
They provide an interface for the OS to control and manage hardware devices, including handling device
initialization, data transfer, and error handling.
Device drivers ensure compatibility between hardware devices and the operating system, allowing users
to access and utilize peripheral devices effectively.
3. System Utilities:

System utilities are software tools provided by the operating system or third-party vendors to perform
various system management tasks.
Examples include disk utilities for managing storage devices (e.g., disk formatting, partitioning, and
defragmentation), network utilities for configuring network settings and troubleshooting connectivity
issues, and performance monitoring tools for analyzing system performance metrics (e.g., CPU usage,
memory usage, disk activity).
System utilities help users and administrators manage system resources, diagnose and troubleshoot
problems, and optimize system performance.
4. File Management Programs:

File management programs facilitate the creation, organization, modification, and deletion of files and
directories on storage devices.
They provide features such as file browsing, file copying and moving, file searching, file compression,
and file permission management.
File management programs ensure efficient storage and retrieval of data, maintain file system integrity,
and enforce security policies related to file access and permissions.
5. Security Software:

Security software includes antivirus programs, firewalls, intrusion detection/prevention systems


(IDS/IPS), and encryption utilities designed to protect computer systems from security threats and
unauthorized access.
Antivirus programs detect and remove malware (e.g., viruses, worms, Trojans), while firewalls monitor
and control network traffic to prevent unauthorized access and protect against network-based attacks.
Encryption utilities secure sensitive data by encrypting files, folders, and communication channels,
ensuring confidentiality and integrity of information.
In summary, system programs encompass a diverse range of software components that play critical roles
in the efficient operation and management of computer systems. From controlling hardware devices to
managing system resources, facilitating user interactions, ensuring data security, and providing essential
utilities, system programs are fundamental to the functionality and reliability of modern computing
environments. Understanding the different types of system programs and their contributions is essential
for effective system administration, troubleshooting, and optimization of computer systems.

Q.2 "What are some key data structures used in the design of an assembler, and how do they
contribute to the efficient translation of assembly language code into machine code?"

In the design of an assembler, several key data structures play essential roles in efficiently translating
assembly language code into machine code. Here are some key data structures commonly used in
assembler design and their contributions to the translation process:
1. Symbol Table:
A symbol table is a data structure used to store information about symbols (labels, variables, and
constants) defined in the assembly code, along with their associated memory addresses or values.
Symbol tables help in resolving symbolic references and associating memory addresses with symbols
during the assembly process.
They facilitate efficient translation by providing quick access to symbol information and supporting
symbol resolution and memory allocation.
2. Parsing Trees:

Parsing trees (or syntax trees) are hierarchical data structures used to represent the syntactic structure of
assembly language instructions.
During the parsing phase of the assembly process, parsing trees are constructed by parsing and analyzing
the assembly code.
Parsing trees facilitate semantic analysis and code generation by organizing assembly language
instructions into a structured format, making it easier to analyze and manipulate them during translation.
3. Opcode Table:

An opcode table is a data structure that maps mnemonic instructions to their corresponding binary
machine code representations.
Opcode tables contain entries for each assembly language instruction, along with their opcode values and
associated instruction formats.
They enable the assembler to look up opcode information quickly and efficiently translate mnemonic
instructions into their binary equivalents during code generation.
4. Instruction Queue:

An instruction queue is a data structure used to buffer assembly language instructions during the
translation process.
As assembly code is parsed and analyzed, instructions are placed in the instruction queue for further
processing.
Instruction queues help in organizing instructions for sequential translation, ensuring proper ordering and
handling of dependencies between instructions.
5. Symbol Resolution Table:
A symbol resolution table is a data structure used to track unresolved symbols and their references in the
assembly code.
During the assembly process, unresolved symbols are recorded in the symbol resolution table until they
are resolved by associating them with memory addresses or values from the symbol table.
Symbol resolution tables help in detecting and handling unresolved symbols, ensuring that all symbols
are resolved before generating machine code.
These key data structures contribute to the efficient translation of assembly language code into machine
code by organizing and managing symbol information, representing the syntactic structure of instructions,
mapping mnemonics to opcode values, buffering instructions for translation, and resolving symbolic
references. By leveraging these data structures, assemblers can perform translation tasks accurately,
quickly, and reliably, resulting in optimized machine code generation and enhanced overall performance.
Q.3 Write a assembly language program for finding a factorial of a number using macro

Explanation:

● The macro CALC_FACTORIAL takes two arguments: the address where the result will be
stored (%1), and the number whose factorial needs to be calculated (%2).
● Within the macro, it initializes ecx with the value of the second argument (%2) and initializes
%1 with 1.
● It then enters a loop where it multiplies %1 by the current value of ecx (the counter) and
decrements ecx until it becomes zero.
● The main program first initializes the number whose factorial needs to be calculated (number),
and a variable to store the result (result).
● It then calls the CALC_FACTORIAL macro, passing the address of result and the value stored
in number.
Finally, it displays the result and exits the program.
This program calculates the factorial of the number 5 (hardcoded) and displays the result. You can modify
the number variable to calculate the factorial of any other number.

Q.4 What are the various loaders and linkers schemes used in software development, and how do they
contribute to the efficient development of user applications?
Various loaders and linkers schemes are employed in software development to facilitate the efficient
execution of user applications. These schemes include static linking, dynamic linking, and various
relocation techniques. Here's an overview of each and their contributions to the efficient development of
user applications:
1. Static Linking:

● Definition: Static linking involves combining all necessary library code and application code into
a single executable file before the program is run. This means that all library functions are
resolved and linked at compile time.
● Performance: Static linking can improve the performance of the application by reducing the
overhead associated with dynamic loading and linking during runtime.
● Portability: Static linking ensures that all necessary dependencies are included within the
executable, making the application more portable as it can be run without relying on external
libraries.
● Simplicity: Static linking simplifies deployment and distribution, as it eliminates the need to
manage and distribute separate library files along with the executable.
● Security: Static linking can enhance security by reducing the risk of dependency-related
vulnerabilities, as the application is self-contained and does not rely on external resources.
2. Dynamic Linking:

● Definition: Dynamic linking involves linking libraries at runtime, where the necessary library
functions are loaded into memory only when they are needed by the application.
● Memory Efficiency: Dynamic linking reduces memory overhead by allowing multiple
applications to share the same copy of a library loaded into memory.
● Flexibility: Dynamic linking provides flexibility in updating and managing libraries, as changes
to shared libraries can be applied without recompiling or relinking the application.
● Resource Utilization: Dynamic linking optimizes resource utilization by loading libraries into
memory on demand, reducing startup time and memory footprint.
● Version Management: Dynamic linking facilitates version management, as different versions of
a library can coexist on the system, and applications can dynamically link to the appropriate
version at runtime.
3. Relocation Techniques:

Definition: Relocation techniques are used by loaders to adjust the memory addresses of program and
library code during the loading process, ensuring that the code can execute correctly regardless of its
location in memory.
● Memory Management: Relocation techniques enable efficient memory allocation and address
resolution, allowing programs to utilize available memory resources optimally.
● Address Space Layout Randomization (ASLR): ASLR is a relocation technique that enhances
security by randomizing the memory addresses of executable code and libraries, making it
difficult for attackers to predict the location of critical system components.
● Position-Independent Code (PIC): PIC is a compilation technique used to generate code that
can be relocated to any memory address, enhancing compatibility and flexibility in dynamic
linking environments.
Overall, loaders and linkers schemes such as static linking, dynamic linking, and relocation techniques
contribute to the efficient development of user applications by optimizing performance, memory
utilization, portability, security, and flexibility. Understanding these schemes is essential for software
developers to make informed decisions regarding application design, deployment, and maintenance.
Q.5 Construct a CLR parsing table for following Grammar

S-->AA
A-->aA|b

Steps for constructing CLR parsing table :


1. Writing augmented grammar
2. LR(1) collection of items to be found
3. Defining 2 functions: goto[list of terminals] and action[list of non-terminals] in the CLR parsing
table
Solution :
STEP 1 – Find augmented grammar
The augmented grammar of the given grammar is:-
S'-->.S ,$ [0th production]
S-->.AA ,$ [1st production]
A-->.aA ,a|b [2nd production]

A-->.b ,a|b [3rd production]


Let’s apply the rule of lookahead to the above productions
● The initial look ahead is always $
● Now, the 1st production came into existence because of ‘ . ‘ Before ‘S’ in 0th production.There is
nothing after ‘S’, so the lookahead of 0th production will be the lookahead of 1st production. ie: S–
>.AA ,$
● Now, the 2nd production came into existence because of ‘ . ‘ Before ‘A’ in the 1st production.After
‘A’, there’s ‘A’. So, FIRST(A) is a,b
Therefore,the look ahead for the 2nd production becomes a|b.
● Now, the 3rd production is a part of the 2nd production.So, the look ahead will be the same.
STEP 2 – Find LR(1) collection of items
Below is the figure showing the LR(1) collection of items. We will understand everything one by one.
The terminals of this grammar are {a,b}
The non-terminals of this grammar are {S,A}
RULE-
1. If any non-terminal has ‘ . ‘ preceding it, we have to write all its production and add ‘ . ‘ preceding
each of its production.
2. from each state to the next state, the ‘ . ‘ shifts to one place to the right.
3. All the rules of lookahead apply here.
● In the figure, I0 consists of augmented grammar.
● Io goes to I1 when ‘ . ‘ of 0th production is shifted towards the right of S(S’->S.). This state is the
accept state . S is seen by the compiler. Since I1 is a part of the 0th production, the lookahead is the
same ie $
● Io goes to I2 when ‘ . ‘ of 1st production is shifted towards right (S->A.A) . A is seen by the
compiler. Since I2 is a part of the 1st production, the lookahead is the same i.e. $.
● I0 goes to I3 when ‘ . ‘ of the 2nd production is shifted towards right (A->a.A) . a is seen by the
compiler. Since I3 is a part of the 2nd production, the lookahead is the same ie a|b.
● I0 goes to I4 when ‘ . ‘ of the 3rd production is shifted towards right (A->b.) . b is seen by the
compiler. Since I4 is a part of the 3rd production, the lookahead is the same i.e. a | b.
● I2 goes to I5 when ‘ . ‘ of 1st production is shifted towards right (S->AA.) . A is seen by the
compiler. Since I5 is a part of the 1st production, the lookahead is the same i.e. $.
● I2 goes to I6 when ‘ . ‘ of 2nd production is shifted towards the right (A->a.A) . A is seen by the
compiler. Since I6 is a part of the 2nd production, the lookahead is the same i.e. $.
● I2 goes to I7 when ‘ . ‘ of 3rd production is shifted towards right (A->b.) . A is seen by the compiler.
Since I6 is a part of the 3rd production, the lookahead is the same i.e. $.
● I3 goes to I3 when ‘ . ‘ of the 2nd production is shifted towards right (A->a.A) . a is seen by the
compiler. Since I3 is a part of the 2nd production, the lookahead is the same i.e. a|b.
● I3 goes to I8 when ‘ . ‘ of 2nd production is shifted towards the right (A->aA.) . A is seen by the
compiler. Since I8 is a part of the 2nd production, the lookahead is the same i.e. a|b.
● I6 goes to I9 when ‘ . ‘ of 2nd production is shifted towards the right (A->aA.) . A is seen by the
compiler. Since I9 is a part of the 2nd production, the lookahead is the same i.e. $.
● I6 goes to I6 when ‘ . ‘ of the 2nd production is shifted towards right (A->a.A) . a is seen by the
compiler. Since I6 is a part of the 2nd production, the lookahead is the same i.e. $.
● I6 goes to I7 when ‘ . ‘ of the 3rd production is shifted towards right (A->b.) . b is seen by the
compiler. Since I6 is a part of the 3rd production, the lookahead is the same ie $.
STEP 3- defining 2 functions:goto[list of terminals] and action[list of non-terminals] in the parsing
table.Below is the CLR parsing table

● $ is by default a non terminal which takes accepting state.


● 0,1,2,3,4,5,6,7,8,9 denotes I0,I1,I2,I3,I4,I5,I6,I7,I8,I9
● I0 gives A in I2, so 2 is added to the A column and 0 row.
● I0 gives S in I1,so 1 is added to the S column and 1st row.
● similarly 5 is written in A column and 2nd row, 8 is written in A column and 3rd row, 9 is written in
A column and 6th row.
● I0 gives a in I3, so S3(shift 3) is added to a column and 0 row.
● I0 gives b in I4, so S4(shift 4) is added to the b column and 0 row.
● Similarly, S6(shift 6) is added on ‘a’ column and 2,6 row ,S7(shift 7) is added on b column and 2,6
row,S3(shift 3) is added on ‘a’ column and 3 row ,S4(shift 4) is added on b column and 3 row.
● I4 is reduced as ‘ . ‘ is at the end. I4 is the 3rd production of grammar. So write r3(reduce 3) in
lookahead columns. The lookahead of I4 are a and b, so write R3 in a and b column.
● I5 is reduced as ‘ . ‘ is at the end. I5 is the 1st production of grammar. So write r1(reduce 1) in
lookahead columns. The lookahead of I5 is $ so write R1 in $ column.
● Similarly, write R2 in a,b column and 8th row, write R2 in $ column and 9th row.

Q.6 What is a DAG? Explain with example. Directed Acyclic Graph :


The Directed Acyclic Graph (DAG) is used to represent the structure of basic blocks, to visualize the flow
of values between basic blocks, and to provide optimization techniques in the basic block. To apply an
optimization technique to a basic block, a DAG is a three-address code that is generated as the result of an
intermediate code generation.
● Directed acyclic graphs are a type of data structure and they are used to apply transformations to
basic blocks.
● The Directed Acyclic Graph (DAG) facilitates the transformation of basic blocks.
● DAG is an efficient method for identifying common sub-expressions.
● It demonstrates how the statement’s computed value is used in subsequent statements.
Examples of directed acyclic graph :

Algorithm for construction of Directed Acyclic Graph :


There are three possible scenarios for building a DAG on three address codes:
Case 1 – x = y op z Case 2 – x = op y Case 3 – x = y
Directed Acyclic Graph for the above cases can be built as follows :
Step 1 –
● If the y operand is not defined, then create a node (y).
● If the z operand is not defined, create a node for case(1) as node(z).
Step 2 –
● Create node(OP) for case(1), with node(z) as its right child and node(OP) as its left child (y).
● For the case (2), see if there is a node operator (OP) with one child node (y).
● Node n will be node(y) in case (3).
Step 3 –
Remove x from the list of node identifiers. Step 2: Add x to the list of attached identifiers for node n.
Example :
T0 = a + b —Expression 1 T1 = T0 + c —-Expression 2 d = T0 + T1 —–Expression 3
Expression 1 : T0 = a + b
Expression 2: T1 = T0 + c

Expression 3 : d = T 0 + T1

Final Directed acyclic graph

Example : T1 = a + b T2 = T1 + c T3 = T1 x T2

You might also like