You are on page 1of 34

OracleDatabaseAdministration

MemoryManagement
SharedPool LibraryCache SharedSqlArea PL/SQLProc. Locksandother DataDict.Cache ControlStruc. SqlArea SGA(SharedMemory) DatabaseBufferCache RedoLogBuffer Keep Cycle Default PGA(NonSharedMemory) SessionInfo LargePool JavaPool

StackSpace

SortArea

PersistantArea RunTimeArea SessionMemory Sqlworkarea SQL> show sga

PrivateSQLArea Freedonlywhencursorisclosed Forinsert,update,deletestatementsfreedafterthestatementhasbeenexecuted.For queriesfreedonlyafterallrowsarefetchedorthequeryiscancelled. InMTSthisisstoredinSGA. Memoryintensiveoperations.

sga_max_sizeparameterdefnesthemaxsizeofthesystemglobalarea.*_area_size parametercannotexceedthisvalue db_cache_size db_keep_cache_size,db_recycle_cache_size log_buffer redologbuffer shared_pool_size large_pool_size parallel_automatic_tuning=TRUE(init.ora) effectsexportandimportperformance java_pool_size forjavastoredprocedures
alter system set db_cache_advice=[ON|OFF]; select * from v$db_cache_advice; -- memory advisories select * from dba_views where view_name like '%ADVICE%' v$java_pool_advice; v$shared_pool_advice; v$sga_target_advice; v$pga_target_advice; dba_hist_java_pool_advice dba_hist_shared_pool_advice dba_hist_sga_target_advice dba_hist_pga_target_advice select name,CEIL(value/1024/1024) MB from v$parameter where name in ('db_cache_size','shared_pool_size','large_pool_size', 'java_pool_size','log_buffer');

1|Page

alter alter alter alter alter

system system system system system

set set set set set

sga_max_size=1024M; db_cache_size=512M; shared_pool_size=256M; large_pool_size=64M; java_pool_size=64M;

-- directly effects pga usage alter system set open_cursors = 50; -- automatically manage private global area for user sessions alter system set work_area_size_policy = AUTO; alter system set pga_aggregate_target = 1500M;

Aftersettingwork_area_size_policytoautoall*_area_sizeparametersdonttakeeffect:
sort_area_size,hash_area_size,bitmap_merge_area_size,create_bitmap_area_size

relatedviews: v$sga v$sgastat v$pgastat

Connectassysdba
# sqlplus /nolog SQL> connect / as sysdba # orapwd file=filename password=password entries=user_count SQL> alter system set remote_login_passwordfile=[EXCLUSIVE|SHARED|NONE]; desc v$PWFILE_USERS; grant sysdba to <user_name>; grant sysoper to <user_name>; -- database operations with sysdba SQL> alter database <db_name> mount | open | open readonly | readwrite | open exclusive; alter system enable restricted session; alter system kill session integer1,integer2 [IMMEDIATE]; -- int1:SID, int2:SERIAL# -- select SID, SESSION# from v$session; shutdown abort | immediate | transactional | normal; show parameter db_cache_size; col <col_name> format a30; show parameter shared_pool_size; alter system set shared_pool_size=256M scope= spfile | memory | both; SQL> alter system enable restricted session; startup restrict; select logins from v$instance; grant restricted session to <user_name>;

Creatingdatabasefromsqlplus
2|Page

1withfilesystemstorage
SQL> create database <db_name> logfile group1(/$home/..) size 100M, group2(/$home/../redo21.log) size 100M maxlogfiles 5 maxlogmembers 5 maxloghistory 1 maxdatafiles 100 maxinstances 1 datafile /$home/oradata/u01system01.dbf size 325M undo tablespace undotbs datafile /$home../undotbs01.dbf size 200M autoextend on next 512K maxsize unlimited default temporary tablespace temp character set US7ASCII national character set AL6UTF16 set time_zone = America/Newyork Archivelog | NoArchivelog

2withoraclemanagedfilesstorage
-- user should set the following parameteres db_create_file_dest --db_create_online_log_dest_n db_create_online_dest_1 db_create_online_dest_2 SQL> @cddba01.sql Create database dba01; Alter procedure <procedure name> compile; Alter package <package name> compile; Alter package <backage name> compile body; --creates the data dictionary views @?/rdbms/admin/sql.bsq; @?/rdbms/admin/catalog.sql; @?/rdbms/admin/catproc.sql;

Controlthefollowingviewsafterdatabasecreation v$logfile v$controlfile v$datafile v$tempfile Controltheoperatingsystemvariablesformemoryallocationandoptimization SHMMAX SHMMNI SHMSEG

controlfilemanagement
-- moving control files to a new location -- moving with spfile SQL> alter system set control_files=/$home/oradata/u01/control01.ctl, /$home/oradata/u02/control02.ctl scope=spfile; shutdown immediate; !cp $home/oradata/u01/control01.ctl $home/oradata/u02/control02.ctl Startup; -- moving with pfile SQL> shutdown immediate; !cp $home/oradata/u01/control01.ctl $home/oradata/u02/control02.ctl --Open the initSID.ora file and update the control_files variable Startup;

3|Page

-- backup controlfile SQL> alter database backup controlfile to <backup_controlfile_name>; alter database backup controlfile to trace; alter database backup controlfile to trace as '<os_path>';

relatedviews: v$controlfile

Redologfile
SQL> Alter system set log_checkpoints_to_alert = TRUE; --each checkpoint will be recorded in the alertSID.log file alter system switch log file; alter system checkpoint; alter system set fast_start_mttr_target=600; (sec?) alter database add logfile group 3 (/data1/u01/redo3_1.log,/data2/u01/redo3_2.log)size = 100M; alter database add logfile member ... to group 3; alter database drop logfile group 3; alter database drop logfile member ...; shutdown immediate; alter database clear logfile ...; alter database clear logfile group <group#>; --recreate the damaged redo files alter database clear unarchived logfile group <group#> --force to recreate unarchived redo files !cp ; alter database rename file .. to ..;

relatedviews: v$log v$loghist v$logfile v$log_history

log miner
--@?/rdbms/admin/dbmslogmnrd.sql; -- related views SELECT * FROM V$LOGMNR_DICTIONARY; SELECT * FROM V$LOGMNR_PARAMETERS; --contents of the logs SELECT * FROM V$LOGMNR_CONTENTS; --which redo logs are being analyzed in the current logminer session SELECT * FROM V$LOGMNR_LOGS; -- minimal supplemental logging ALTER DATABASE ADD SUPPLEMENTAL LOG DATA; ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE INDEX) COLUMNS; ALTER DATABASE DROP SUPPLEMENTAL LOG DATA; ALTER TABLE scott.emp ADD SUPPLEMENTAL LOG GROUP

4|Page

emp_parttime (empno, ename, deptno) ALWAYS; ALTER TABLE scott.emp ADD SUPPLEMENTAL LOG GROUP emp_parttime (empno, ename, deptno); ALTER TABLE scott.emp DROP SUPPLEMENTAL LOG GROUP emp_parttime; -- to create all logminer objects in to the specified tablespace EXECUTE DBMS_LOGMNR_D.SET_TABLESPACE('logmnrts$'); EXECUTE DBMS_LOGMNR_D.BUILD ('dictionary.ora', '/oracle/database/', OPTIONS => DBMS_LOGMNR_D.STORE_IN_FLAT_FILE); EXECUTE DBMS_LOGMNR_D.BUILD (OPTIONS=>DBMS_LOGMNR_D.STORE_IN_REDO_LOGS); EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => 'log1orc1.ora', OPTIONS => DBMS_LOGMNR.NEW); EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => 'log2orc1.ora', OPTIONS => DBMS_LOGMNR.ADDFILE); EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => 'log3orc1.ora', OPTIONS => DBMS_LOGMNR.REMOVEFILE); EXECUTE DBMS_LOGMNR.START_LOGMNR( OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS); EXECUTE DBMS_LOGMNR.START_LOGMNR ( OPTIONS => DBMS_LOGMNR.DICT_FROM_ONLINE_CATALOG); -- For example, to see all the DDLs executed by user SYS, you could issue the following query SELECT USERNAME, SQL_REDO FROM V$LOGMNR_CONTENTS WHERE USERNAME = 'SYS' AND OEPRATION = 'DDL'; -- filtering redo_log actions EXECUTE DBMS_LOGMNR.START_LOGMNR( OPTIONS => DBMS_LOGMNR.COMMITTED_DATA_ONLY); EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.SKIP_CORRUPTION); EXECUTE DBMS_LOGMNR.START_LOGMNR(DICTFILENAME => '/oracle/dictionary.ora', STARTTIME => TO_DATE('01-Jan-1998 08:30:00', 'DD-MON-YYYY HH:MI:SS'), ENDTIME => TO_DATE('01-Jan-1998 08:45:00', 'DD-MON-YYYY HH:MI:SS')); EXECUTE DBMS_LOGMNR.START_LOGMNR(DICTFILENAME => '/oracle/dictionary.ora', STARTSCN => 100, ENDSCN => 150); EXECUTE DBMS_LOGMNR.END_LOGMNR; -- a session example ALTER DATABASE ADD SUPPLEMENTAL LOG DATA; EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => 'log1orc1.ora', OPTIONS => DBMS_LOGMNR.NEW); EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => 'log2orc1.ora', OPTIONS => DBMS_LOGMNR.ADDFILE); EXECUTE DBMS_LOGMNR.START_LOGMNR( DICTFILENAME =>'/oracle/database/dictionary.ora'); EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.DICT_FROM_ONLINE_CATALOG + DBMS_LOGMNR.COMMITTED_DATA_ONLY); EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.COMMITTED_DATA_ONLY); EXECUTE DBMS_LOGMNR.START_LOGMNR(DICTFILENAME => 'orcldict.ora', STARTTIME => TO_DATE('01-Jan-1998 08:30:00', 'DD-MON-YYYY HH:MI:SS'), ENDTIME => TO_DATE('01-Jan-1998 08:45:00', 'DD-MON-YYYY HH:MI:SS'));

tablespaces and datafiles


--- maximum number of datafiles can a database own. --

5|Page

select * from v$parameter where upper(name) like 'DB_FILES'; -- ************************************* -- creating a locally managed tablespace -- ************************************* create tablespace ts_data datafile 'C:\ts_data_01.dbf' size 50m autoextend on next 5m maxsize 200m | unlimited extent management local segment space management auto logging online permanent|temporary; /* extent management dictionary default storage ( initial 1m next 1m pctincrease 0 uniform size 512k); */ -- dropping a tablespace drop tablespace ts_data including contents and datafiles; -- altering tablespace alter tablespace ts_data [read only|read write]; alter tablespace ts_data [offline|online] [immediate|normal]; alter database datafile '<datafile_name>' resize 200m; alter database datafile '<datafile_name>' online | offline; --archivelog alter database datafile '<datafile_name>' offline for drop; --noarchivelog -- altering datafiles for autoextend properties ALTER DATABASE DATAFILE '/home/oracle/oradata/pq1.dbf' AUTOEXTEND OFF; ALTER DATABASE DATAFILE '/home/oracle/oradata/pq10.dbf' AUTOEXTEND ON NEXT 100M MAXSIZE 20480M; alter tablespace ts_data add datafile 'C:\ts_data_02.dbf' size 200m; alter tablespace ts_data rename file 'C:\ts_data_02.dbf' to 'D:\ts_data_02.dbf'; alter tablespace ts_data drop datafile '<datafilename>'; alter tablespace ts_data coalesce; -- ***************************** -- creating temporary tablespace -- ***************************** create temporary tablespace ts_temp tempfile 'C:\ts_temp_01.dbf' size 10m extent management local; -- altering for default temporary tablespace alter database default temporary tablespace ts_temp; -- checking systems default temporary tablespace select * from database_properties; alter tablespace ts_temp add tempfile 'C:\ts_temp_02.dbf' size 100m; alter tablespace ts_temp drop tempfile '<tempfilename>'; alter database tempfile | datafile '<filename>' drop including datafiles;

6|Page

/* inittrans & maxtrans: initial and the maximum transaction that are created in an index or a data block. inittrans: default is 1 for data segment. Default is 2 for index segment. maxtrans: default 255. pctfree: default %10. pctused: default %40. */ select file_name, autoextensible from dba_data_files; -- renaming files alter tablespace user_data offline; --!copy the files as you need --dbms_file_transfer can also be used here /* dbms_file_transfer.copy_file dbms_file_transfer.put_file dbms_file_transfer.get_file */ alter database rename file 'C:\ts_data_02.dbf' to 'D:\ts_data_02.dbf'; alter tablespace ts_data rename file 'C:\ts_data_02.dbf' to 'D:\ts_data_02.dbf'; alter tablespace user_data online;

dba_extents dba_segments dba_data_files dba_free_space v$tablespace dba_tablespaces v$temp_extent_map dba_users v$sort_usage

--join with v$session

undo management
optimal undo_retention parameter = actual undo size / (db_block_size/undo_block_per_sec)

-- ********************** -- manual undo management -- ********************** create rollback segment r01 tablespace rbs; create public rollback_segment r01 tablespace rbs; alter rollback segment r01 offline; drop rollback segment r01; -- ************************* -- automatic undo management -- ************************* alter alter alter alter alter system system system system system set set set set set undo_management = auto; undo_tablespace = <ts_name>; undo_suppress_errors = true|false ; undo_guarantee = true|false ; undo_retention = 900; --seconds

select name, value from v$parameter where name like 'undo_management'

7|Page

select name, value from v$parameter where name like 'undo_tablespace' select name, value from v$parameter where name like 'undo_retention' -- creating undo tablespace create undo tablespace ts_undo datafile 'C:\ts_undo_01.dbf' size 20m; alter tablespace ts_undo add datafile 'C:\ts_undo_02.dbf' size 40m; alter system set undo_tablespace = ts_undo; drop tablespace ts_undo; alter tablespace ts_undo retention guarantee; select retention, tablespace_name from dba_tablespaces; select begin_time, end_time, tuned_undoretention from v$undostat; -- calculating undo space select ceil(((UR * (UPS * DBS)) + (DBS * 24))/1024/1024) as MB from (select value as UR from v$parameter where name='undo_retention'), (select (sum(undoblks)/sum(((end_time - begin_time) * 84600))) as UPS from v$undostat), (select value as DBS from v$parameter where name = 'db_block_size'); set transaction read only; set transaction isolation level serializable; -- possible negative impact on performance -- Actual Undo Size SELECT SUM(a.bytes) "UNDO_SIZE" FROM v$datafile a, v$tablespace b, dba_tablespaces c WHERE c.contents = 'UNDO' AND c.status = 'ONLINE' AND b.name = c.tablespace_name AND a.ts# = b.ts#; -- Undo Blocks per Second SELECT MAX(undoblks/((end_time-begin_time)*3600*24)) "UNDO_BLOCK_PER_SEC" FROM v$undostat; -- DB Block Size SELECT TO_NUMBER(value) "DB_BLOCK_SIZE [KByte]" FROM v$parameter WHERE name = 'db_block_size'; -- optimal undo retention SELECT d.undo_size/(1024*1024) "ACTUAL UNDO SIZE [MByte]", SUBSTR(e.value,1,25) "UNDO RETENTION [Sec]", ROUND((d.undo_size / (to_number(f.value) * g.undo_block_per_sec))) "OPTIMAL UNDO RETENTION [Sec]" FROM ( SELECT SUM(a.bytes) undo_size FROM v$datafile a, v$tablespace b, dba_tablespaces c WHERE c.contents = 'UNDO' AND c.status = 'ONLINE' AND b.name = c.tablespace_name AND a.ts# = b.ts# ) d, v$parameter e, v$parameter f, ( SELECT MAX(undoblks/((end_time-begin_time)*3600*24)) undo_block_per_sec FROM v$undostat ) g WHERE e.name = 'undo_retention' AND f.name = 'db_block_size';

8|Page

-- optimal undo size SELECT d.undo_size/(1024*1024) "ACTUAL UNDO SIZE [MByte]", SUBSTR(e.value,1,25) "UNDO RETENTION [Sec]", (TO_NUMBER(e.value) * TO_NUMBER(f.value) * g.undo_block_per_sec) / (1024*1024) "NEEDED UNDO SIZE [MByte]" FROM ( SELECT SUM(a.bytes) undo_size FROM v$datafile a, v$tablespace b, dba_tablespaces c WHERE c.contents = 'UNDO' AND c.status = 'ONLINE' AND b.name = c.tablespace_name AND a.ts# = b.ts# ) d, v$parameter e, v$parameter f, ( SELECT MAX(undoblks/((end_time-begin_time)*3600*24)) undo_block_per_sec FROM v$undostat ) g WHERE e.name = 'undo_retention' AND f.name = 'db_block_size'; -- undo consumer sessions SELECT TO_CHAR (s.SID) || ',' || TO_CHAR (s.serial#) sid_serial, NVL (s.username, 'None') orauser, s.program, r.NAME undoseg, t.used_ublk * TO_NUMBER (x.VALUE) / 1024 || 'K' "Undo", t1.tablespace_name FROM SYS.v_$rollname r, SYS.v_$session s, SYS.v_$transaction t, SYS.v_$parameter x, dba_rollback_segs t1 WHERE s.taddr = t.addr AND r.usn = t.xidusn(+) AND x.NAME = 'db_block_size' AND t1.segment_id = r.usn AND t1.tablespace_name = 'UNDOTBS1'; -- undo advisory views select begin_time, end_time, undoblks, maxquerylen, ssolderrcnt, nospaceerrcnt from v$undostat; select begin_time, end_time, tuned_undoretention from v$undostat;

relatedviews: dba_rollback_segs dba_undo_extents v$rollname v$rollstat v$undostat dba_hist_undostat v$session v$transaction 9|Page

OracleManagedFiles
initparameters
DB_CREATE_FILE_DEST -- datafiles tempfiles DB_CREATE_ONLINE_LOG_DEST_n -- 1 <= n <= 5 -- redologs and controlfiles DB_RECOVERY_FILE_DEST -- archivedlogs, rman backups, flashback logs DB_RECOVERY_FILE_DEST_SIZE = 20G DB_UNIQUE_NAME = <db_name> <destination_location>/<db_unique_name>/<datafile>/o1_mf_%t_%u_.dbf

examples
CREATE DATABASE sample; -- system and sysaux datafiles will be 100MB and autoextend on -- redo log files will be two members 100MB -- undo file will be 10M with autoextend on if undo_management=AUTO CREATE DATABASE sample DATAFILE SIZE 400M SYSAUX DATAFILE SIZE 200M DEFAULT TEMPORARY TABLESPACE dflt_ts TEMPFILE SIZE 10M UNDO TABLESPACE undo_ts DATAFILE SIZE 10M; CREATE DATABASE sample DEFAULT TEMPORARY TABLESPACE temp; /* * datafiles will have the specified properties * name of the datafiles will be oracle managed */ ALTER SYSTEM SET DB_CREATE_FILE_DEST = '/u01/oradata'; CREATE TABLESPACE tbs_1; CREATE TABLESPACE tbs_2 DATAFILE SIZE 400M; CREATE TABLESPACE tbs_3 DATAFILE AUTOEXTEND ON MAXSIZE 800M; CREATE TABLESPACE tbs_4 DATAFILE SIZE 200M SIZE 200M; CREATE UNDO TABLESPACE undotbs_1; ALTER TABLESPACE tbs_1 ADD DATAFILE AUTOEXTEND ON MAXSIZE 800M; CREATE TEMPORARY TABLESPACE temptbs_1; ALTER TABLESPACE TBS_1 ADD TEMPFILE; ALTER TABLESPACE tbs_1 ADD DATAFILE; DROP TABLESPACE tbs_1;

-- Creating Controlfiles -- NORESETLOGS EXAMPLE CREATE CONTROLFILE DATABASE sample LOGFILE GROUP 1 ('/u01/oradata/SAMPLE/onlinelog/o1_mf_1_o220rtt9_.log', '/u02/oradata/SAMPLE/onlinelog/o1_mf_1_v2o0b2i3_.log') SIZE 100M,

10 | P a g e

GROUP 2 ('/u01/oradata/SAMPLE/onlinelog/o1_mf_2_p22056iw_.log', '/u02/oradata/SAMPLE/onlinelog/o1_mf_2_p02rcyg3_.log') SIZE 100M NORESETLOGS DATAFILE '/u01/oradata/SAMPLE/datafile/o1_mf_system_xu34ybm2_.dbf' SIZE 100M, '/u01/oradata/SAMPLE/datafile/o1_mf_sysaux_aawbmz51_.dbf' SIZE 100M, '/u01/oradata/SAMPLE/datafile/o1_mf_sys_undotbs_apqbmz51_.dbf' SIZE 100M MAXLOGFILES 5 MAXLOGHISTORY 100 MAXDATAFILES 10 MAXINSTANCES 2 ARCHIVELOG; -- RESETLOGS EXAMPLE -- Some combination of DB_CREATE_FILE_DEST, DB_RECOVERY_FILE_DEST, and -- DB_CREATE_ONLINE_LOG_DEST_n or must be set. CREATE CONTROLFILE DATABASE sample RESETLOGS DATAFILE '/u01/oradata/SAMPLE/datafile/o1_mf_system_aawbmz51_.dbf', '/u01/oradata/SAMPLE/datafile/o1_mf_sysaux_axybmz51_.dbf', '/u01/oradata/SAMPLE/datafile/o1_mf_sys_undotbs_azzbmz51_.dbf' SIZE 100M MAXLOGFILES 5 MAXLOGHISTORY 100 MAXDATAFILES 10 MAXINSTANCES 2 ARCHIVELOG; -- Adding Redo Logs DB_CREATE_ONLINE_LOG_DEST_1 = '/u01/oradata' DB_CREATE_ONLINE_LOG_DEST_2 = '/u02/oradata' ALTER DATABASE ADD LOGFILE; -- Creating Archived Logs Using Oracle-Managed Files DB_RECOVERY_FILE_DEST_SIZE = 20G DB_RECOVERY_FILE_DEST = '/u01/oradata' LOG_ARCHIVE_DEST_1 = 'LOCATION=USE_DB_RECOVERY_FILE_DEST'

ASM
initparametersofASMinstance
INSTANCE_TYPE -- must be ASM ASM_POWER_LIMIT -- [1-10] ASM_DISKSTRING ASM_DISKGROUPS Css should be up and running for synchronizing db instance and asm instance -- crsctl check cssd SELECT NAME,TYPE,TOTAL_MB,FREE_MB FROM V$ASM_DISKGROUP; -- insufficient number of failure groups or disks running out of disk space select redundancy_lowered from v$asm_file;

11 | P a g e

-- free_mb required_mirror_free_mb = 2 * usable_file_mb select required_mirror_free_mb, usable_file_mb from v$asm_diskgroup;

creatingdiskgroup
-- disk names are available in v$asm_disk view -- set ORACLE_SID and ORACLE_HOME for asm instance
sqlplus /nolog SQL> connect / as sysdba startup nomount; create diskgroup <disk_group_name> normal|high|external redundancy [force|noforce] [failgroup <failgroupname>] DISK <disk name> [NAME <diskname>], <disk name> [failgroup <failgroupname> DISK <disk name>, <disk name>] create diskgroup dg1 disks '/dev/sdc', '/dev/sdd', '/dev/sde', '/dev/sdf'; create diskgroup dgroupa normal redundancy failgroup controller2 disk '/dev/rdsk/c2*' failgroup controller3 disk '/dev/rdsk/c3*'; alter diskgroup <disk_group_name> add disk disk_name NAME diska1, disk_name NAME diska2, disk_name[5678], disk_name* [FORCE] [REBALANCE POWER 5 WAIT|NOWAIT]

alter diskgroup <disk_group_name> drop disk <disk_name> alter diskgroup <disk_group_name> resize disks [in failgroup <failgroupname> [size 100M]] alter diskgroup <disk_group_name> rebalance [WAIT|NOWAIT] [POWER n]; select * from v$asm_operation; alter diskgroup <disk_group_name>|ALL MOUNT | DISMOUNT; alter diskgroup <disk_group_name> check [REPAIR|NOREPAIR] all; drop diskgroup <disk_group_name> INCLUDING | EXCLUDING contents; alter diskgroup <disk_group_name> add directory +<disk_group_name>/<dir_name>/..; alter diskgroup <disk_group_name> rename directory <dir1> to <dir2>; alter diskgroup <disk_group_name> drop directory <dir_name>; alter diskgroup <disk_group_name> add alias .. FOR ..; select * from v$asm_alias; alter diskgroup <disk_group_name> drop file ..; select software_version, compatible_version from v$asm_client;

fullyqualifiedasmfilename
+group/dbname/file_type/file_type_tag.file.incarnation +dgroup2/sample/controlfile/Current.256.541956473

numericasmfilename
+group.file.incarnation +dgroup2.257.541956473

incompleteASMfilenames 12 | P a g e

asmcreatestheuniquefilename
alter system set db_create_file_dest=+diskgroup1;

ifyouwantasmtobedefaultdestinationsetthefollowinginitparameters
DB_CREATE_FILE_DEST DB_CREATE_ONLINE_LOG_DEST_n DB_RECOVERY_FILE_DEST CONTROL_FILES LOG_ARCHIVE_DEST_n LOG_ARCHIVE_DEST STANDBY_ARCHIVE_DEST LARGE_POOL_SIZE minimum 1mb UsinganASMFilenameinaSQLStatement:Example CREATE TABLESPACE tspace2 DATAFILE '+dgroup2' SIZE 200M AUTOEXTEND ON; CreatingaDatabaseinASM DB_CREATE_FILE_DEST = '+dgroup1' DB_RECOVERY_FILE_DEST = '+dgroup2' DB_RECOVERY_FILE_DEST_SIZE = 10G CREATE DATABASE sample;

CreatingaTablespaceinASM
DB_CREATE_FILE_DEST = '+dgroup2' CREATE TABLESPACE tspace2;

thefileisnotanOraclemanagedfile
CREATE UNDO TABLESPACE myundo DATAFILE '+dgroup3(my_undo_template)/myfiles/my_undo_ts' SIZE 200M; ALTER DISKGROUP dgroup3 DROP FILE '+dgroup3/myfiles/my_undo_ts';

AddingNewRedoLogFiles
DB_CREATE_ONLINE_LOG_DEST_1 = '+dgroup1' DB_CREATE_ONLINE_LOG_DEST_2 = '+dgroup2' ALTER DATABASE ADD LOGFILE;

CreatingaControlFileinASM noresetlogsexample,scriptisfromalterdatabasebackupcontrolfiletotrace
CREATE CONTROLFILE REUSE DATABASE "SAMPLE" NORESETLOGS ARCHIVELOG MAXLOGFILES 16 MAXLOGMEMBERS 2 MAXDATAFILES 30 MAXINSTANCES 1 MAXLOGHISTORY 226 LOGFILE GROUP 1 ( '+DGROUP1/db/onlinelog/group_1.258.541956457', '+DGROUP2/db/onlinelog/group_1.256.541956473' ) SIZE 100M, GROUP 2 ( '+DGROUP1/db/onlinelog/group_2.257.541956477', '+DGROUP2/db/onlinelog/group_2.258.541956487' ) SIZE 100M DATAFILE '+DGROUP1/db/datafile/system.260.541956497',

13 | P a g e

'+DGROUP1/db/datafile/sysaux.259.541956511' CHARACTER SET US7ASCII ;

resetlogsexample
CREATE CONTROLFILE REUSE DATABASE "SAMPLE" RESETLOGS ARCHIVELOG MAXLOGFILES 16 MAXLOGMEMBERS 2 MAXDATAFILES 30 MAXINSTANCES 1 MAXLOGHISTORY 226 LOGFILE GROUP 1 ( '+DGROUP1', '+DGROUP2' ) SIZE 100M, GROUP 2 ( '+DGROUP1', '+DGROUP2' ) SIZE 100M DATAFILE '+DGROUP1/db/datafile/system.260.541956497', '+DGROUP1/db/datafile/sysaux.259.541956511' CHARACTER SET US7ASCII ; SQL> create tablespace new_tbs datafile '+dg1' size 100m; SQL> alter tablespace system add datafile '+system_dg' size 1000m; SQL> alter database add logfile group 4 '+dg_log1','+dg_log2' size 100m; SQL> alter system set log_archive_dest_1='location=+dg_arc1'; SQL> alter system set log_archive_dest_2='location=+dg_arc2'; SQL> alter system set db_recovery_file_dest='+dg_flash';

systemviews
V$ASM_DISKGROUP V$ASM_DISK V$ASM_DISKGROUP_STAT V$ASM_DISK_STAT V$ASM_FILE V$ASM_TEMPLATE V$ASM_ALIAS V$ASM_OPERATION V$ASM_CLIENT

TablePartititions:
Rangepartition: Theexamplebelowcreatesatableoffourpartitions,oneforeachquarter'ssales.The columnssale_year,sale_month,andsale_dayarethepartitioningcolumns,whiletheir valuesconstituteaspecificrow'spartitioningkey.TheVALUESLESSTHANclausedetermines thepartitionbound:rowswithpartitioningkeyvaluesthatcomparelessthantheordered listofvaluesspecifiedbytheclausearestoredinthepartition.Eachpartitionisgivena name(sales_q1,sales_q2,...),andeachpartitioniscontainedinaseparatetablespace(tsa, tsb,...). 14 | P a g e

CREATE TABLE sales ( invoice_no NUMBER, sale_year INT NOT NULL, sale_month INT NOT NULL, sale_day INT NOT NULL ) PARTITION BY RANGE (sale_year, sale_month, sale_day) ( PARTITION sales_q1 VALUES LESS THAN (1999, 04, 01) PARTITION sales_q2 VALUES LESS THAN (1999, 07, 01) PARTITION sales_q3 VALUES LESS THAN (1999, 10, 01) PARTITION sales_q4 VALUES LESS THAN (2000, 01, 01)

TABLESPACE TABLESPACE TABLESPACE TABLESPACE

tsa, tsb, tsc, tsd );

ALTER TABLE sales ADD PARTITION jan96 VALUES LESS THAN ( '01-FEB-1999' ) TABLESPACE tsx; ALTER TABLE sales DROP PARTITION dec98; ALTER INDEX sales_area_ix REBUILD; (if global index exists) ALTER TABLE <table_name> move partition <partition_name> tablespace <ts_name> COMPRESS; ALTER TABLE <table_name> move partition <partition_name> tablespace <ts_name> UPDATE INDEXES; ALTER TABLE <table_name> add partition <part_name> values less than (TO_DATE('01.03.2007', 'DD.MM.YYYY')) UPDATE INDEXES; ALTER TABLE <table_name> EXCHANGE PARTITION <partition_name> WITH TABLE <table_name> WITH VALIDATION UPDATE INDEXES; ALTER TABLE <table_name> SPLIT PARTITION <partition_name> AT <range_definition> INTO (PARTITION <first_partition>, PARTITION <second_partition>) UPDATE GLOBAL INDEXES; DELETE FROM sales WHERE TRANSID < 10000; ALTER TABLE sales DROP PARTITION dec98; ALTER TABLE sales DROP PARTITION dec98 UPDATE GLOBAL INDEXES; ALTER TABLE four_seasons MERGE PARTITIONS quarter_one, quarter_two INTO PARTITION quarter_two; CREATE INDEX i_four_seasons_l ON four_seasons ( one,two ) LOCAL ( PARTITION i_quarter_one TABLESPACE i_quarter_one, PARTITION i_quarter_two TABLESPACE i_quarter_two, PARTITION i_quarter_three TABLESPACE i_quarter_three, PARTITION i_quarter_four TABLESPACE i_quarter_four ); ALTER TABLE four_seasons MODIFY PARTITION quarter_two REBUILD UNUSABLE LOCAL INDEXES; ALTER TABLE four_seasons TRUNCATE PARTITION quartes_one [UPDATE INDEXES];

SELECT * FROM schema.table PARTITION(part_name); alter table pq.smsgateway add partition PTI_200911 values less than (to_date('01.12.2009','DD.MM.YYYY')); alter table pq.smsgateway add partition PTI_200912 values less than

15 | P a g e

(to_date('01.01.2010','DD.MM.YYYY')); alter table pq.smsgateway add partition PTI_MAX values less than (MAXVALUE);

Droppingindexpartitions:
ALTER INDEX npr DROP PARTITION P1; ALTER INDEX npr REBUILD PARTITION P2;

HashPartition: Thefollowingexamplecreatesahashpartitionedtable.Thepartitioningcolumnisid,four partitionsarecreatedandassignedsystemgeneratednames,andtheyareplacedinfour namedtablespaces(gear1,gear2,...).


CREATE TABLE scubagear (id NUMBER, name VARCHAR2 (60)) PARTITION BY HASH (id) PARTITIONS 4 STORE IN (gear1, gear2, gear3, gear4); ALTER TABLE scubagear ADD PARTITION p_named TABLESPACE gear5;

ListPartition: Thefollowingexamplecreatesalistpartitionedtable.Itcreatestableq1_sales_by_region whichispartitionedbyregionsconsistingofgroupsofstates.


CREATE TABLE q1_sales_by_region (deptno number, deptname varchar2(20), quarterly_sales number(10, 2), state varchar2(2)) PARTITION BY LIST (state) (PARTITION q1_northwest VALUES ('OR', 'WA'), PARTITION q1_southwest VALUES ('AZ', 'UT', 'NM'), PARTITION q1_northeast VALUES ('NY', 'VM', 'NJ'), PARTITION q1_southeast VALUES ('FL', 'GA'), PARTITION q1_northcentral VALUES ('SD', 'WI'), PARTITION q1_southcentral VALUES ('OK', 'TX')); ALTER TABLE q1_sales_by_region ADD PARTITION q1_nonmainland VALUES ('HI', 'PR') STORAGE (INITIAL 20K NEXT 20K) TABLESPACE tbs_3 NOLOGGING; ALTER TABLE q1_sales_by_region MERGE PARTITIONS q1_northcentral, q1_southcentral INTO PARTITION q1_central PCTFREE 50 STORAGE(MAXEXTENTS 20);

RangeHashPartition: Thefollowingstatementcreatesarangehashpartitionedtable.Inthisexample,threerange partitionsarecreated,eachcontainingeightsubpartitions.Becausethesubpartitionsarenot named,systemgeneratednamesareassigned,buttheSTOREINclausedistributesthem acrossthe4specifiedtablespaces(ts1,...,ts4).


CREATE TABLE scubagear (equipno NUMBER, equipname VARCHAR(32), price NUMBER)

16 | P a g e

PARTITION BY RANGE (equipno) SUBPARTITION BY HASH(equipname) SUBPARTITIONS 8 STORE IN (ts1, ts2, ts3, ts4) (PARTITION p1 VALUES LESS THAN (1000), PARTITION p2 VALUES LESS THAN (2000), PARTITION p3 VALUES LESS THAN (MAXVALUE));

RangeListPartition: Thefollowingexampleillustrateshowrangelistpartitioningmightbeused.Theexample trackssalesdataofproductsbyquartersandwithineachquarter,groupsitbyspecified states.


CREATE TABLE quarterly_regional_sales (deptno number, item_no varchar2(20), txn_date date, txn_amount number, state varchar2(2)) TABLESPACE ts4 PARTITION BY RANGE (txn_date) SUBPARTITION BY LIST (state) (PARTITION q1_1999 VALUES LESS THAN (TO_DATE('1-APR-1999','DD-MONYYYY')) (SUBPARTITION q1_1999_northwest VALUES ('OR', 'WA'), SUBPARTITION q1_1999_southwest VALUES ('AZ', 'UT', 'NM'), SUBPARTITION q1_1999_northeast VALUES ('NY', 'VM', 'NJ'), SUBPARTITION q1_1999_southeast VALUES ('FL', 'GA'), SUBPARTITION q1_1999_northcentral VALUES ('SD', 'WI'), SUBPARTITION q1_1999_southcentral VALUES ('OK', 'TX') ), PARTITION q2_1999 VALUES LESS THAN ( TO_DATE('1-JUL-1999','DD-MONYYYY')) (SUBPARTITION q2_1999_northwest VALUES ('OR', 'WA'), SUBPARTITION q2_1999_southwest VALUES ('AZ', 'UT', 'NM'), SUBPARTITION q2_1999_northeast VALUES ('NY', 'VM', 'NJ'), SUBPARTITION q2_1999_southeast VALUES ('FL', 'GA'), SUBPARTITION q2_1999_northcentral VALUES ('SD', 'WI'), SUBPARTITION q2_1999_southcentral VALUES ('OK', 'TX') ), PARTITION q3_1999 VALUES LESS THAN (TO_DATE('1-OCT-1999','DD-MONYYYY')) (SUBPARTITION q3_1999_northwest VALUES ('OR', 'WA'), SUBPARTITION q3_1999_southwest VALUES ('AZ', 'UT', 'NM'), SUBPARTITION q3_1999_northeast VALUES ('NY', 'VM', 'NJ'), SUBPARTITION q3_1999_southeast VALUES ('FL', 'GA'), SUBPARTITION q3_1999_northcentral VALUES ('SD', 'WI'), SUBPARTITION q3_1999_southcentral VALUES ('OK', 'TX') ), PARTITION q4_1999 VALUES LESS THAN ( TO_DATE('1-JAN-2000','DD-MONYYYY')) (SUBPARTITION q4_1999_northwest VALUES ('OR', 'WA'), SUBPARTITION q4_1999_southwest VALUES ('AZ', 'UT', 'NM'), SUBPARTITION q4_1999_northeast VALUES ('NY', 'VM', 'NJ'), SUBPARTITION q4_1999_southeast VALUES ('FL', 'GA'), SUBPARTITION q4_1999_northcentral VALUES ('SD', 'WI'), SUBPARTITION q4_1999_southcentral VALUES ('OK', 'TX') ) ); ALTER TABLE quarterly_regional_sales MODIFY SUBPARTITION q1_1999_southeast ADD VALUES ('KS');

17 | P a g e

ALTER TABLE quarterly_regional_sales MODIFY SUBPARTITION q1_1999_southeast DROP VALUES ('KS');

ParallelExecution
ParallelHints
Select /*+PRALLEL(<tb_name>, 8|DEFAULT)*/ /*+NOPARALLEL(<tb_name>)*/ /*+PARALLEL_INDEX(<tb_name>, <ind_name>,3,2)*/ /*+NOPARALLEL_INDEX*/

123456-

alter table <tb_name> parallel (degree 8) select degree from user_tables where table_name = 'EMP'; select /*+ PARALLEL(<tb_name>,<degree>) */ from <tb_name> select count(*) from emp; select /*+ PARALLEL(emp,4) */ COUNT(*) from emp; alter table emp noparallel;

PARALLEL_AUTOMATIC_TUNINGdeprecated10g PARALLEL_ADAPTIVE_MULTI_USERif<numberofsessionsisgettinghigher>then<degreeof parallelismshouldbelower>) ParallelDML|DDL:


alter session [enable|disable] parallel [dml|ddl]; insert /*+ PARALLEL(emp_big,4,1) */ into <table_name> insert /*+ PARALLEL(emp_big,4,1) */ into emp_big select * from emp; commit;

ParallelDDL:
create table <tb_name> parallel (degree 8) alter table <tb_name> parallel (degree 8) create index <ind_name> on <tb_name> (<col_name>).... parallel (degree 8) create index emp_ix on emp(emp_id) tablespace ts_index parallel (degree 4);

ParallelLoading:
sqlldr direct=true parallel=true

ParallelRecovery:
recover tablespace <tbs_name> parallel (degree [8|DEFAULT])

ParallelReplication:
DBMS_DEFER_SYS.SCHEDULE_PUSH ( DESINATION -> par1.world, INTERVAL -> sysdate+1/24, NEXT_DATE -> sysdate+1/24,

18 | P a g e

PARALLELISM -> 6 ); Alter session force parellel DDL parallel 5;

degreeofparallelismnumberofparallelexecutionservers APARALLELclauseinastatement ThePARALLELclausethatwasusedwhentheobjectwascreatedoraltered Aparallelhintinsertedintothestatement AdefaultdeterminedbyOracle Parallelexecutioncanbetunedforyouautomaticallybysettingtheinitializationparameter;


PARALLEL_AUTOMATIC_TUNING = TRUE Alter session Alter session ENABLE | DISABLE PARALLEL DDL | DML ; force PARALLEL DDL | DML PARALLEL 5;

Relatedinit.oraparameters parallel_threads_per_cpu cpu_count parallel_max_servers parallel_min_servers parallel_adaptive_multi_user parallel_min_percent

Tables
/* Extended rowid needs 10 bytes of storage on disk, is displayed by 18 chars. Data object number table,index 32 bits Relative file number unique for each file 10 bits Block number position of the block 22 bits Row number position of the row 16 bits */ select row_id, department_id from hr.departments; create table hr.employees ( employee_id number(6), first_name varchar2(20), last_name varchar2(25), hire_date date default sysdate ) storage( initial 200k, next 200k, pctincrease 0, minextents 1, maxextents 5, tablespace data); -- delete rows on commit CREATE GLOBAL TEMPORARY TABLE gtt_zip2 ( zip_code VARCHAR2(5), by_user VARCHAR2(30), entry_date DATE )

19 | P a g e

ON COMMIT DELETE ROWS; -- delete rows on end session CREATE GLOBAL TEMPORARY TABLE gtt_zip3 ( zip_code VARCHAR2(5), by_user VARCHAR2(30), entry_date DATE ) ON COMMIT PRESERVE ROWS; CREATE GLOBAL TEMPORARY TABLE <temp_table_name> ( as select * from hr.employees ) ON COMMIT PRESERVE ROWS; /* Compute pctfree and pctused values Pct used: [(Average row size - initial row size) * 100] / Average row size Pct used: [100 - pct free - (average row size * 100)] / Available data space */ alter table hr.employees pctfree 30 pctused 50 storage( next 500k minextents 2 maxextents 100); alter table hr.employees allocate extent ( size 500k datafile '/disk3/data1.dbf'); alter table hr.employees move tablesapce data1; truncate table hr.employees; drop table hr.employees cascade constrains; alter table hr.employees drop column comments cascade constrains checkpoint 1000; alter alter alter alter alter alter alter alter alter alter table table table table table table table table table table <table_name> <table_name> <table_name> <table_name> <table_name> <table_name> <table_name> <table_name> <table_name> <table_name> add <col_name> char(1); rename column <old_col_name> to <new_col_name>; drop column <col_name>; set unused column <col_name>; drop unused columns checkpoint 1000; set unusued column cascade constrains; drop unusued columns checkpoint 1000; modify (<column_name> number); modify (<column_name> not null); modify (<column_name> number null);

alter table <old_table_name> rename to <new_table_name>; alter alter alter alter alter alter table table table table table table <table_name> <table_name> <table_name> <table_name> <table_name> <table_name> move; move parallel 4 compress; move nocompress; move tablespace <ts_name>; move tablespace <ts_name> parallel 4; move nologging;

alter trigger <trigger_name> rename to <new_trigger_name> alter table <table_name> rename constraint <constraint_name> TO <new_constraint_name>; --related views: dba_tables

20 | P a g e

dba_objects dba_segments dba_extents dba_tab_columns dba_tab_partitions dba_ind_partitions

indexes
logical unique nonunique concetaneted function based physical partioned B-tree Bitmap create index hr.employees_last_name_idx on hr.employees(last_name) Pctfree 30 Storage ( Initial 200k next 200k Pctincrease 0 Maxextents 50 Tablespace indx); create bitmap index orders_region_id_idx on orders(region_id) Pctfree 30 Storage ( initial 200k next 200k pctincrease 0 maxextents 50 tablespace indx); alter index orders_region_id_idx allocate extent (size 200k datafile '/disk1/...'); alter index orders_region_id_idx deallocate unused; --move index to a different tablespace alter index orders_region_id_idx rebuild tablespace indx02; alter index orders_region_id_idx rebuild online; alter index orders_region_id_idx coalesce; alter index orders_region_id_idx rebuild compress; alter index orders_region_id_idx rebuild compress online; alter index orders_region_id_idx rebuild parallel 2 compress online; --query index_stats analyze index orders_region_id_idx validate structure; drop index hr.employees_name_index; --query v$object_usage alter index hr.dept monitoring alter index hr.dept_id_idx monitoring usage nomonitoring Constrains not null unique primary key foreign key check States of constrains disable novalidate disable validate

21 | P a g e

enable novalidate enable validate create table hr.employees ( id number(7) constraint employee_id_pk primary key deferrable using index, last_name varchar2(25) constraint employee_last_name_nn not null); alter table hr.employee enable validate constraint emp_dept_fk; --related views: dba_constraints dba_objects dba_indexes dba_ind_partitions dba_ind_column

Constraints

-- unique index alter table t_ep_test add constraint con_object_id_number unique (object_id) -- adding foreign key and primary key constraints alter table t_ep_test2 add constraint con_object_id_primary_key primary key (object_id) alter table foreign key null alter table foreign key t_ep_test_child add constraint con_object_id_foreign_key2 (object_id) references t_ep_test2 (object_id) on delete set t_ep_test_child add constraint con_object_id_foreign_key2 (object_id) references t_ep_test2 (object_id) on delete cascade

-- dropping foreign key and primary key constraints alter table t_ep_test_child drop constraint con_object_id_foreign_key2 alter table t_ep_test2 drop constraint con_object_id_primary_key -- adding not null constraint alter table t_ep_test2 modify (owner constraint cons_not_null_owner not null) -- removing not null constraint alter table t_ep_test2 drop constraint cons_not_null_owner -- check constraints CREATE TABLE divisions (div_no NUMBER CONSTRAINT check_divno CHECK (div_no BETWEEN 10 AND 99) DISABLE, div_name VARCHAR2(9) CONSTRAINT check_divname CHECK (div_name = UPPER(div_name)) DISABLE, office VARCHAR2(10) CONSTRAINT check_office CHECK (office IN ('DALLAS','BOSTON', 'PARIS','TOKYO')) DISABLE); CREATE TABLE dept_20 (employee_id NUMBER(4) PRIMARY KEY, last_name VARCHAR2(10), job_id VARCHAR2(9), manager_id NUMBER(4), salary NUMBER(7,2), commission_pct NUMBER(7,2), department_id NUMBER(2), CONSTRAINT check_sal CHECK (salary * commission_pct <= 5000));

22 | P a g e

-- adding check constraint alter table t_ep_test2 add constraint cons_check_status check (STATUS IN ('VALID','INVALID')) enable validate; -- altering constraints alter table t_ep_test2 modify constraint cons_check_status disable novalidate alter table t_ep_test2 drop constraint cons_check_status;

alter table t_ep_test2 disable constraint cons_check_status; alter table t_ep_test2 enable validate constraint cons_check_status;

ALTER TABLE table_name ADD CONSTRAINT constraint_name FOREIGN KEY (col1, col2) REFERENCES table_2 (cola,colb);

alter table tbl add constraint pk_tbl primary key (col_1, col_2) using index tablespace ts_idx


alter table table_name add constraint constraint_name unique (column_name)

alter table table-name disable constraint-specification; alter table table-name disable constraint constraint-name; alter table table_name drop constraint constraint_name;

Triggers

-- ### TRIGGER TEMPLATE CREATE OR REPLACE TRIGGER SYSADM.PRM_LOG AFTER INSERT ON SYSADM.PARAMETER_VALUE REFERENCING NEW AS New OLD AS Old FOR EACH ROW WHEN (NEW.PARAMETER_ID = 34) DECLARE tmpVar NUMBER; CurrentUserName VARCHAR2(16); BEGIN SELECT USERNAME INTO CurrentUserName FROM V$SESSION WHERE AUDSID = (SELECT USERENV('SESSIONID') FROM DUAL); INSERT INTO PARAMETER_VALUE_ENTRY_LOG VALUES(:NEW.PRM_VALUE_ID, sysdate, CurrentUserName); EXCEPTION WHEN OTHERS THEN -- Consider logging the error and then re-raise RAISE; END PRM_LOG;

DMLstatements(DELETE,INSERT,UPDATE) DDLstatements(CREATE,ALTER,DROP) Databaseoperations(SERVERERROR,LOGON,LOGOFF,STARTUP,SHUTDOWN) BEFORETRUNCATE,BEFORERENAME,AFTERLOGON,BEFORELOGOFF,BEFOREDDL,

-- ### TRIGGER TEMPLATE CREATE OR REPLACE TRIGGER Print_salary_changes

23 | P a g e

BEFORE DELETE OR INSERT OR UPDATE ON Emp_tab FOR EACH ROW WHEN (new.Empno > 0) DECLARE sal_diff number; BEGIN sal_diff := :new.sal - :old.sal; dbms_output.put('Old salary: ' || :old.sal); dbms_output.put(' New salary: ' || :new.sal); dbms_output.put_line(' Difference ' || sal_diff); END;

-- ### LOGON TRIGGER CREATE OR REPLACE PROCEDURE foo (c VARCHAR2) AS BEGIN INSERT INTO Audit_table (user_at) VALUES(c); END; CREATE OR REPLACE TRIGGER logontrig AFTER LOGON ON DATABASE -- Just call an existing procedure. The ORA_LOGIN_USER is a function -- that returns information about the event that fired the trigger. CALL foo (ora_login_user) -- ### INSERTING, UPDATING and DELETING IF INSERTING THEN ... END IF; IF UPDATING THEN ... END IF; CREATE OR REPLACE TRIGGER ... ... UPDATE OF Sal, Comm ON Emp_tab ... BEGIN ... IF UPDATING ('SAL') THEN ... END IF; END; -- ### TRIGGERS ON OBJECT TABLES CREATE OR REPLACE TYPE t AS OBJECT (n NUMBER, m NUMBER) / CREATE TABLE tbl OF t / BEGIN FOR j IN 1..5 LOOP INSERT INTO tbl VALUES (t(j, 0)); END LOOP; END; / CREATE TABLE tbl_history ( d DATE, old_obj t, new_obj t) / CREATE OR REPLACE TRIGGER Tbl_Trg AFTER UPDATE ON tbl FOR EACH ROW BEGIN INSERT INTO tbl_history (d, old_obj, new_obj) VALUES (SYSDATE, :OLD.OBJECT_VALUE, :NEW.OBJECT_VALUE); END Tbl_Trg; / UPDATE tbl SET tbl.n = tbl.n+1 / BEGIN FOR j IN (SELECT d, old_obj, new_obj FROM tbl_history) LOOP Dbms_Output.Put_Line ( j.d|| ' -- old: '||j.old_obj.n||' '||j.old_obj.m|| ' -- new: '||j.new_obj.n||' '||j.new_obj.m); END LOOP; END; /

-- ### DDL TRIGGERS

24 | P a g e

CREATE OR REPLACE TRIGGER my_trigger AFTER CREATE ON DATABASE BEGIN null; END; ALTER TRIGGER <TRIGGER_NAME> COMPILE; ALTER TRIGGER <TRIGGER_NAME> ENABLE|DISABLE; ALTER TABLE <TABLE_NAME> ENABLE|DISABLE ALL TRIGGERS; /**** BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER BEFORE / AFTER AFTER SUSPEND ****/ CREATE OR REPLACE TRIGGER <trigger_name> <BEFORE | AFTER> <triggering_action> ON <SCHEMA | DATABASE> DECLARE -- variable declarations BEGIN -- trigger code EXCEPTION -- exception handler END <trigger_name>; / ALTER ANALYZE ASSOCIATE STATISTICS AUDIT COMMENT CREATE DDL DISASSOCIATE STATISTICS DROP GRANT NOAUDIT RENAME REVOKE TRUNCATE

Related views: USER_TRIGGERS ALL_TRIGGERS DBA_TRIGGERS

Recyclebin
ALTER SYSTEM SET recyclebin = OFF|ON; SELECT * from DBA_RECYCLEBIN; SELECT * FROM USER_RECYCLEBIN; SELECT object_name, original_name FROM dba_recyclebin WHERE owner = 'HR'; SQL> show recyclebin SELECT * FROM "BIN$yrMKlZaVMhfgNAgAIMenRA==$0"; PURGE TABLE BIN$jsleilx392mk2=293$0; PURGE TABLE int_admin_emp; PURGE TABLESPACE example;

25 | P a g e

PURGE TABLESPACE example USER oe; PURGE RECYCLEBIN; PURGE DBA_RECYCLEBIN; FLASHBACK TABLE int_admin_emp TO BEFORE DROP [RENAME TO int2_admin_emp];
SELECT object_name, original_name, createtime FROM recyclebin; OBJECT_NAME -----------------------------BIN$yrMKlZaLMhfgNAgAIMenRA==$0 BIN$yrMKlZaVMhfgNAgAIMenRA==$0 BIN$yrMKlZaQMhfgNAgAIMenRA==$0 ORIGINAL_NAME --------------INT2_ADMIN_EMP INT2_ADMIN_EMP INT2_ADMIN_EMP CREATETIME ------------------2006-02-05:21:05:52 2006-02-05:21:25:13 2006-02-05:22:05:53

FLASHBACK TABLE BIN$yrMKlZaVMhfgNAgAIMenRA==$0 TO BEFORE DROP;

SharedServer
-------------------Init parameters ------------------ at least DISPATCHERS and SHARED_SERVERS must be set to enable oracle shared server -- example of DISPATCHERS parameter setting ALTER SYSTEM SET DISPATCHERS='(PROTOCOL=TCP)(DISPATCHERS=2)'; ALTER SYSTEM SET DISPATCHERS='(PROT=tcp)(DISP=5)', '(PROT-tcps)(DISP=3)' ALTER SYSTEM SET DISPATCHERS="(ADDRESS=(PROTOCOL=TCP)(HOST=144.25.16.201))(DISPATCHERS=2)" ALTER SYSTEM SET DISPATCHERS="(ADDRESS=(PROTOCOL=TCP)(PORT=5000))" -- altering dispatchers ALTER SYSTEM SET DISPATCHERS = '(INDEX=0)(DISP=3)', '(INDEX=1)(DISP=1)'; --or ALTER SYSTEM SET DISPATCHERS = '(PROT=tcp)(DISP=3)', '(PROT-tcps)(DISP=1)'; ALTER ALTER ALTER ALTER SYSTEM SYSTEM SYSTEM SYSTEM SET SET SET SET MAX_DISPATCHERS=5; SHARED_SERVERS=5; MAX_SHARED_SERVERS=50; SHARED_SERVER_SESSIONS=500; --SESSIONS, PROCESSES

ALTER SYSTEM SET CIRCUITS=500; ALTER SYSTEM SET MTS_CIRCUITS=500; ---------------System views -------------V$DISPATCHER V$DISPATCHER_RATE V$DISPATCHER_CONFIG --10g V$QUEUE V$SHARED_SERVER V$CIRCUIT V$SHARED_SERVER_MONITOR V$SGA V$SGASTAT V$SHARED_POOL_RESERVED --Identifying Contention Using the Dispatcher-Specific Views

26 | P a g e

select * from v$dispatcher select * from v$dispatcher_rate --Identifying Contention for Shared Servers SELECT DECODE(TOTALQ, 0, 'No Requests', WAIT/TOTALQ || ' HUNDREDTHS OF SECONDS') "AVERAGE WAIT TIME PER REQUESTS" FROM V$QUEUE WHERE TYPE = 'COMMON'; SELECT COUNT(*) "Shared Server Processes" FROM V$SHARED_SERVER WHERE STATUS != 'QUIT'; -- shutting down dispatchers -- Each dispatcher is uniquely identified by a name of the form Dnnn. -- To shut down dispatcher D002, issue the following statement: ALTER SYSTEM SHUTDOWN IMMEDIATE 'D002'; -- To terminate dispatchers once all shared server clients disconnect, enter this statement: ALTER SYSTEM SET DISPATCHERS = ''; ---------------------------------Using Shared Server on Clients -------------------------------sales= (DESCRIPTION= (ADDRESS=(PROTOCOL=tcp)(HOST=sales-server)(PORT=1521)) (CONNECT_DATA= (SERVICE_NAME=sales.us.acme.com) (SERVER=shared))) --------------------------------------Overriding Shared Server on Clients ------------------------------------sales= (DESCRIPTION= (ADDRESS=(PROTOCOL=tcp)(HOST=sales-server)(PORT=1521)) (CONNECT_DATA= (SERVICE_NAME=sales.us.acme.com) (SERVER=dedicated))) /* bppserp00> ps -ef | grep oracle | wc -l 153 bppserp00> sqlplus "/ as sysdba" SQL*Plus: Release 9.2.0.7.0 - Production on Fri Sep 19 16:34:20 2008 Copyright (c) 1982, 2002, Oracle Corporation. All rights reserved.

Connected to: Oracle9i Enterprise Edition Release 9.2.0.7.0 - Production With the Partitioning and Oracle Data Mining options JServer Release 9.2.0.7.0 - Production SQL> select count(*) from v$session; COUNT(*) ---------565

27 | P a g e

*/

Userandpasswordmanagement
Toenablepasswordmanagement,runutlpwdmg.sqlassysuser. Relatedparameters: Failed_login_attempts Password_lock_time Password_life_time Password_Grace_time Password_reuse_time Password_reuse_max Password_verify_functionthisfunctionmustbecreatedinsysschemaandmusthavethe followingspecification function_name(userid_parameterinvarchar2(30), password_parameterinvarchar2(30), old_password_parameterinvarchar2(30) )returnboolean;
SQL> create profile grace_s [LIMIT] failed_login_attemps 3 Password_lock_time unlimitied ... alter profile ... drop profile <profile_name> [cascade]; alter system set resouce_limit=true; /* cpu_per_session sessions_per_user connect _time idle_time logical_reads_per_session private_sga (for shared server only) */

create user <user_name> identified by <password> default tablespace data default temporary tablesapce temp quota 15m on data quota 10m on users password expire;

relatedviews: dba_users dba_profiles dba_ts_quotas


SQL> Alter system set os_authent_prefix = [OPS$] //default Alter system set remote_os_authent = true|false

28 | P a g e

Create user ops$user identified by <password>; Alter|drop <user name> [cascade]; Grant create session to emi [with admin option]; Revoke create table from emi; Grant update on emi.customers to jeff with grant option; Grant execute on dbms_output to jeff;

Relatedviews: Dba_sys_privs Session_privs Dba_tab_privs Dba_col_privs


SQL> create role ol_clerk; Create role hr_clerk identified by bonus; Grant ol_cleck to scott; Grant hr_clerk to hr_manager [with admin option]; Show parameter max_enabled_roles; Alter user scott default role all [except hr_clerk]; Drop role <role name;>

SQL> Audit <table name>; Audit create any trigger; Audit select on emi.orders; Alter system kill session sid, serial#; Select status, username from v$session;

SQL> SQL>

--On Unix: SQL> select spid from v$process where not exists (select 1 from v$session where paddr=addr); !kill <spid>; --On Windows: SQL> select spid, osuser, s.program from v$process p, v$session s where p.addr=s.paddr; !orakill sid thread

-- create user with ts quota template CREATE USER <user_name> IDENTIFIED BY <password> DEFAULT TABLESPACE data_ts QUOTA 100M ON test_ts QUOTA 512K ON data_ts TEMPORARY TABLESPACE temp_ts PROFILE <profile_name>; GRANT connect TO <user_name>;

Relatedviews: All_def_audit_opts Dba_stmt_audit_opts 29 | P a g e

Dba_priv_audit_opts Dba_obj_audit_opts Dba_audit_trail Dba_audit_exists Dba_audit_object Dba_audit_session Dba_audit_statement

BACKUPandRECOVERY
Mean_time_between_failures Mean_time_to_recover
Problem Statementfailure logicerror insufficientprivileges Processfailure usersessionabnormallyterminated Usererrors droptable truncatetable delete...commit Instancefailure

shouldbebiggerinvalue shouldbesmallerinvalue
Solution Changetheuserquota(alteruser) Addfilespacetothetablespace Pmon Recoverfromabackup

Restarttheinstance

fast_start_mttr_target: log_checkpoint_interval: Relatedviews: v$datafile v$controlfile v$logfile dba_datafiles v$backup v$datafile_header v$sgastat v$log v$logfile v$fast_start_servers v$fast_start_transactions v$instancerecovery

expectedmttrspecifiedinsecondsamountofthetimethathas passedsincetheincrementalcheckpointatthepositionwhere thelastwritetoredologfileoccured. numberofredologfileblocksthatcanexistbetweenan incrementalcheckpointandthelastblockwrittentotheredo log.

30 | P a g e

recovery_estimated_ios: actual_redo_blks: taret_redo_blks: log_file_size_redo_blks: estimated_mttr: ckpt_block_writes:

numberofdirtybuffersinthebuffercache currentactualnumberofredoblocksrequiredforrecovery currentnumberofredoblocksthatmustbeprocessedforrecovery currentnumberofredoblocksrequiredtoguaranteethatalogswitch doesnotoccurbeforecheckpoint. currentestimatedmeantimetorecover(mttr).Basedonthenumberof dirtybuffersandredologblocks. numberofblockswrittenbycheckpointwrites.

log_chkpt_timeout_redo_blks:

rollingforwardphase setrecovery_parallelism<integer> useparallelclauseintherecoverdatabasestatement rollingbackphase setfast_start_parallel_rollback[false|low|high]lowisdefault

usermanagedbackupandrecovery
offlinebackupconsistantwholedatabasesetup(scriptswillbesupplied) onlinebackupthedatabaseshouldbeinarchivelogmode(scriptswillbesupplied) usermanagedrecovery - timebased - cancelbased - changebased recoverysteps 1 damagedfilesarerestoredfrombackup 2 changesfromarchivedredologsoronlineredologsareappliedifnecessary 3 thedatabasemaynowcontaincommitedanduncommitedchanges 4 theundoblockareusedtorollbackanyuncommitedchanges 5 thedatabaseisnowinrecoverdstate recoveryinnoarchivelogmode 1 restorealldatafilesevenoneofthemneedsrecovery 2 shutdowntheinstance 3 performcancelbasedrecovery 4 opendatabasewithresetlogs
SQL> shutdown immediate !cp ... ... recover database until cancel using backup control file cancel alter database open resetlogs

recoveryinarchivelogmode 1 querythev$recover_file,v$archived_log,v$recovery_log 31 | P a g e

2 3 4 5
SQL>

recoverdatabase recoverdatafile... recovertablespaceusers recoverdatafile...

mounteddatabase mounteddatabase opendatabase opendatabase

shutdown abort Startup mount Recover database --recover datafile ... Alter database open !cp ... ... alter database rename file ... to ... startup mount alter database datafile ... offline alter database open restore datafile alter database rename file ... to ... alter database recover recover datafile ... --recover tablespace <tablespace name> alter database datafile ... online; alter alter alter alter tablespace user_data offline immediate; database create datafile ... [as] ...; database recover; tablespace table_data online;

create controlfile; recover database using backup controlfile;

1 shutdownandbackupthedatabase 2 restorealldatafiles,dontrestorecontrolfile,redologs,passwordfileandparameter file 3 mountdatabase 4 recoverdatafiles 5 opendatabasewithresetlogs 6 performacloseddatabasebackup


SQL> Recover database until cancel; Recover [automatic] database until time 2001-03-04:14:22:03;

Timebaseddatabaserecovery 1 shutdownandbackupthedatabase 2 restorealldatafiles mayneedtorecoverarchivelogs 3 mountdatabase 4 recoverdatabaseuntiltime 5 openwithresetlogs 6 backupthedatabase cancelbaseddatabaserecovery 1 redologsarenotmultiplexed oneoftheredologsismissing themissingredologisnotarchived 2 shutdowndatabase 3 restorealldatafilesfrombackup 4 mountthedatabase 5 recoverdatabaseuntilcancel 6 opendatabasewithresetlogs 32 | P a g e

7 backupdatabase lossofcurrentredologfiles - attempttoopendatabase - findthecurrentlogsequencenumber - recoverdatabaseuntilcancel - dropandrecreatelogfilesifnecessary - opendatabasewithresetlogs - performwholedatabasebackup


SQL>

select*fromv$log

alter database clear unarchived logfile group 2;

importandexport
exportutility - tablemode - usermode - tablespacemode - databasemode $ exphr/hrtables=employees,departments,...rows=yfile=expdat.dmp expsystem/managerowner=hrdirect=yfile=expdat.dmp exp\username/passwordassysdba\transport_tablespace=ytablespaces=ts_emp log=emp.log exportparameters - buffer - compress(y) - file(expdat.dmp) - full(n) - help - indexes(y) - log(none) - owner(none) - rows(y) - tables(none) - tablespaces(none) - userid(none) user/password - parfile(none) importutility - table - user - tablespace - fulldatabase $ imphr/hrtables=employees,departmentsrows=yfile=expdat.dmp impsystem/managerfromuser=hrfile=expdat.dmp imp\user/passwordassysdba\transport_tablespace=ytablespaces=ts_employees importparameters - buffer - datafiles(none)

33 | P a g e

destroy(n) file(expdat.dmp) fromuser(none) full(n) help ignore(n) indexes(y) log(none) parfile(none) rows(y) tables(none) tablespaces(none) touser(none) userid(none)

importsequence - createnewtables - importdata - buildindex - importtrigger - integtarityconstaintsenable - builtbitmap,functional,domainindexes

34 | P a g e