November 30, 2012

SCHEMA REFRESH in ORACLE

Steps for SCHEMA REFRESH:-
------------------------
1. Capture the SOURCE database SCHEMA OBJECTS COUNT which are to be refreshed.
2. Take EXPORT of required SCHEMA(s) at SOURCE Database.
3. Copy the EXPORT DUMPFILE(s) to DESTINATION Database Server.(SCP)
4. Take EXPORT of required SCHEMA(s) at DESTINATION Database.(Recommended)
5. DROP the SCHEMA(s) objects at DESTINATION Database except DATABASE LINK (DB LINK).
6. IMPORT the Copied SOURCE EXPORT DUMPFILE(s) to the DESTINATION Database.
7. THE TARGET SCHEMA OBJECT COUNT SHOULD BE SAME AS THE SOURCE SCHEMA OBJECT COUNT CAPTURED.
8. Gather Schema statistics at DESTINATION Database after schema refresh.(Recommended)

SOURCE
------
Hostname       :
Database Name  :
Schema(s) Name :

DESTINATION
------
Hostname       :
Database Name  :
Schema(s) Name :


@ SOURCE
--------
Database Details:
----------------
http://select-star-from.blogspot.in/2013/08/oracle-database-details.html
Table Space Report
------------------
http://select-star-from.blogspot.in/2013/08/tablespace-utilization-script-in-oracle.html
ASM Space Report
----------------
http://select-star-from.blogspot.in/2013/03/asm-tablespace-utilization-scripts.html

Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');

select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;

select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by object_type; 

FILE_NAME
---------
/FILE/LOCATION/

Check for the space availability for taking backup
--------------------------------------------------
df -h or df -gt
df -h /FILE/LOCATION/

cd /source/backup/location/

mkdir EXPORT_REF_NO
chmod 755 EXPORT_REF_NO
cd EXPORT_REF_NO

pwd
/source/backup/location/EXPORT_REF_NO

EXPDP
-----

col DIRECTORY_PATH for a80;
select * from dba_directories;

create or replace directory <EXPDPDIR> as '/source/backup/location/EXPORT_REF_NO';
grant read,write ON DIRECTORY <EXPDPDIR> to <SYSTEM>;
alter user <username> quota unlimited on <tablespace_name>; ---> If required


col DIRECTORY_PATH for a80;
select * from dba_directories;

PAR file
--------
vi SOURCE_SCHEMA_EXPDP.par

userid="/ as sysdba"
directory=DATAPUMP_DIR
dumpfile=source_schemaname_expdp_%U.dmp
logfile=source_schemaname_expdp.log
schemas=schemaname1,schemaname2
exclude=statistics

if required,
PARALLEL=6
JOB_NAME=SOURCE_SCHEMA_EXPDP
COMPRESSION=ALL
STATUS=60
FLASHBACK_TIME="TO_TIMESTAMP('24-APR-2013 16:00:00','DD-MON-YYYY HH24:MI:SS')"
--OR--
FLASHBACK_TIME="TO_TIMESTAMP('24-APR-2013 16:00:00','DD-MM-YYYY HH24:MI:SS')"

:wq

nohup expdp parfile=SOURCE_SCHEMA_EXPDP.par &

Check the status of EXPORT
--------------------------
http://select-star-from.blogspot.in/2013/06/longops-export.html


## COMPRESSION - Reduce size of dumpfile contents, where valid keyword values are: (METADATA_ONLY) and NONE.

## You can export backup of database which was like certain months ago. This you can do using the FLASHBACK_TIME or FLASHBACK_SCN option.

## FLASHBACK_TIME - Time used to get the SCN closest to the specified time.The export operation is performed with data that is consistent as of this SCN.

EXP
---
If the size of the schema is less better to go for EXP/IMP
nohup exp <user>/`pwd` file=source_schemaname_exp.dmp log=source_schemaname_exp.log owner=schemaname statistics=NONE resumable=y resumable_name=continue resumable_timeout=18000

-----------------------------------------------------------------------------------------------------

SCP SOURCE DUMP FILES TO DESTINATION 
----------------------------------------------------------
@ Source
--------
scp source_schemaname_expdp*.dmp oracle@target_hostname:/target/backup/location/EXPORT_REF_NO/

(OR)

@ Target
--------
cd /target/backup/location/EXPORT_REF_NO/
scp oracle@source_hostname:/source/backup/location/EXPORT_REF_NO/source_schemaname_expdp*.dmp .

pwd
/target/backup/location/EXPORT_REF_NO/
ls -lrth source_schemaname_expdp*.dmp --------> scp files are here

-----------------------------------------------------------------------------------------------------

@ Target
--------
Database Details:
----------------
http://select-star-from.blogspot.in/2013/08/oracle-database-details.html
Table Space Report
------------------
http://select-star-from.blogspot.in/2013/08/tablespace-utilization-script-in-oracle.html
ASM Space Report
----------------
http://select-star-from.blogspot.in/2013/03/asm-tablespace-utilization-scripts.html

Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');

select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;

select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by object_type; 


Check for the space availabilty for taking backup
-------------------------------------------------
df -h or df -gt
df -h /target/backup/location/

cd /target/backup/location/

mkdir EXPORT_REF_NO
chmod 755 EXPORT_REF_NO
cd EXPORT_REF_NO

pwd
/target/backup/location/EXPORT_REF_NO

EXPDP
-----

col DIRECTORY_PATH for a80;
select * from dba_directories;

PAR file
--------

vi TARGET_SCHEMA_EXPDP.par &  -----> same as above PAR file at SOURCE DATABASE
:wq
nohup expdp parfile=TARGET_SCHEMA_EXPDP.par &

Check the status of EXPORT
--------------------------
http://select-star-from.blogspot.in/2013/06/longops-export.html

EXP
---
If the size of the schema is less better to go for EXP/IMP
nohup exp userid=\'/ as sysdba \' file=target_schemaname_exp.dmp log=target_schemaname_exp.log owner=schemaname statistics=NONE resumable=y resumable_name=continue resumable_timeout=18000 &

------------------------------------------------------------------------------------------------

Drop Schema(s) Objects
http://select-star-from.blogspot.in/2013/05/drop-objects.html

Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');

select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;

select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by object_type; 

exit

NOTE:-
If required,To remove all dropped objects from the recyclebin (current user)

$sqlplus "/as sysdba"

SQL> PURGE DBA_RECYCLEBIN;

DBA Recyclebin purged.

-----------------------------------------------------------------------------------------------------

pwd
/target/backup/location/EXPORT_REF_NO/

ls -lrth source_schemaname_expdp*.dmp

Restore schema backup
---------------------

IMPDP
-----
 vi import_source_schemaname_expdp.par

 userid="/ as sysdba" 
directory=EXPDPDIR
dumpfile=source_schemaname_expdp_%U.dmp

logfile=import_source_schemaname_expdp.log
schemas=schemaname1,schemaname2
PARALLEL=6
JOB_NAME=IMPORT_SCHEMA_EXPDP

If required,
remap_schema=<source_schema1>:<target_schema1>,<source_schema2>:<target_schema2>
remap_tablespace=<source_tablespace>:<target_tablespace>
exclude=constraint,ref_constraint,index

nohup impdp parfile=import_source_schemaname_expdp.par &


Check the status of IMPORT
--------------------------
http://select-star-from.blogspot.in/2013/06/longops-import.html

Validation of Import of Schemas
-------------------------------

set pages 50000 lines 32767
col owner format a15
col object_type format a20
col object_name format a30
select owner, object_name, object_type,status,created,last_ddl_time from dba_objects where owner in ('&owner')  group by object_type,owner order by object_type
/

IMP
---
nohup imp userid=\'/ as sysdba \' file=source_schemaname_exp.dmp log=source_schemaname_exp_import.log fromuser=schemaname touser=schemaname ignore=y statistics=NONE commit=y resumable=y resumable_name=continue resumable_timeout=18000 &

-----------------------------------------------------------------------------------------------------

Schema Status
-------------
select username,account_status,default_tablespace from dba_users where username=upper('&username');

select owner,sum(bytes)/1024/1024 "SIZE in MB" from dba_segments where owner=upper('&owner') group by owner;

select owner,object_type,count(*) from dba_objects where owner=upper('&owner') and object_name not like 'BIN$%' group by object_type,owner order by object_type; 

NOTE:
THE TARGET SCHEMA OBJECT COUNT SHOULD BE SAME AS THE SOURCE SCHEMA OBJECT COUNT CAPTURED

-----------------------------------------------------------------------------------------------------

Gather schema stats after schema refresh
----------------------------------------
set timing on
exec dbms_stats.gather_schema_stats(ownname=>'&schema_name', CASCADE=>TRUE,ESTIMATE_PERCENT=>30,degree =>4);

-----------------------------------------------------------------------------------------------------
NOTE: ONCE TASK COMPLETED AT PRODUCTION SERVER IMMEDIATLY DO EXIT.


TABLE REFRESH steps in ORACLE

Steps for TABLE REFRESH:-
-----------------------
1. Capture the SOURCE database TABLE ROW COUNT which are to be refreshed.
2.Take EXPORT of required TABLE(s) at SOURCE Database.
3. Copy the EXPORT DUMPFILE(s) to DESTINATION Database Server.(SCP)
4. Take EXPORT of required TABLE(s) at DESTINATION Database.(Recommended)
5. TRUNCATE the required TABLE(s) to be restored at DESTINATION Database.(Recommended)

6. IMPORT the Copied SOURCE EXPORT DUMPFILE(s) to the DESTINATION Database.
7. THE TARGET TABLE ROW COUNT SHOULD BE SAME AS THE SOURCE TABLE ROW COUNT CAPTURED.

SOURCE
------
Hostname       :
Database Name  :
Table(s) Name :

DESTINATION
------
Hostname       :
Database Name  :
Table(s) Name :


@ SOURCE
--------


Database Details:
----------------
http://select-star-from.blogspot.in/2013/08/oracle-database-details.html
Table Space Report
------------------
http://select-star-from.blogspot.in/2013/08/tablespace-utilization-script-in-oracle.html
ASM Space Report
----------------
http://select-star-from.blogspot.in/2013/03/asm-tablespace-utilization-scripts.html

Check for the space availability for taking backup
--------------------------------------------------
df -h or df -gt
df -h /source/backup/location/

cd /source/backup/location/

mkdir EXPORT_REF_NO
chmod 755 EXPORT_REF_NO
cd EXPORT_REF_NO

pwd
/source/backup/location/EXPORT_REF_NO
 
Schema Status
-------------
select username from dba_users where username=upper('&username');

Table Status
------------
select count(*) from SCHEMANAME.TABLENAME;

EXPDP
-----
col DIRECTORY_PATH for a80;
select * from dba_directories;

create or replace directory DATAPUMP_DIR as '/source/backup/location/';
grant read,write ON DIRECTORY DATAPUMP_DIR to <system>;
alter user <username> quota unlimited on <tablespace_name>; ---> If required

col DIRECTORY_PATH for a80;
select * from dba_directories;

PAR file
--------
vi SOURCE_TABLENAME_EXPDP.par

userid="/ as sysdba"
directory=DATAPUMP_DIR
dumpfile=SOURCE_TABLENAME_EXPDP_%U.dmp
logfile=SOURCE_TABLENAME_EXPDP.log
tables=SOURCE_SCHEMANAME.TABLENAME1,SOURCE_SCHEMANAME.TABLENAME2
JOB_NAME=SOURCE_TABLE_EXPORT
parallel=6
COMPRESSION=ALL 
STATUS=60
EXCLUDE=CONSTRAINT,INDEX,TRIGGER

filesize=4G

:wq

nohup expdp  parfile=SOURCE_TABLENAME_EXPDP.par &

Check the status of EXPORT
--------------------------
http://select-star-from.blogspot.in/2013/06/longops-export.html


EXP
---
If the size is less better to go for EXP/IMP
nohup exp userid=\'/ as sysdba \' file=SOURCE_TABLENAME_EXP.dmp log=SOURCE_TABLENAME_EXP.log tables=OWNER.TABLENAME &
-----------------------------------------------------------------------------------------------------
SCP SOURCE DUMP FILES TO DESTINATION 
----------------------------------------------------------
@ Source
--------
scp source_tablename_expdp*.dmp oracle@target_hostname:/target/backup/location/EXPORT_REF_NO/

(OR)

@ Target
--------
cd /target/backup/location/EXPORT_REF_NO/
scp oracle@source_hostname:/source/backup/location/EXPORT_REF_NO /source_tablename_expdp*.dmp .

pwd
/target/backup/location/EXPORT_REF_NO/
ls -lrth source_tablename_expdp*.dmp

-----------------------------------------------------------------------------------------------------

@ Target
--------

Database Details:
----------------
http://select-star-from.blogspot.in/2013/08/oracle-database-details.html
Table Space Report
------------------
http://select-star-from.blogspot.in/2013/08/tablespace-utilization-script-in-oracle.html
ASM Space Report
----------------
http://select-star-from.blogspot.in/2013/03/asm-tablespace-utilization-scripts.html

Check for the space availability for taking backup
--------------------------------------------------
df -h
df -h /target/backup/location/

cd /target/backup/location/

mkdir EXPORT_REF_NO
chmod 777 EXPORT_REF_NO
cd EXPORT_REF_NO

pwd
/target/backup/location/EXPORT_REF_NO

Schema Status
-------------
select username from dba_users where username=upper('&username');

Table Status
------------
select count(*) from SCHEMANAME.TABLENAME;

EXPDP
-----
col DIRECTORY_PATH for a80;
select * from dba_directories;

create or replace directory DATAPUMP_DIR as '/target/backup/location/EXPORT_REF_NO/';
grant read,write ON DIRECTORY DATAPUMP_DIR to system;

cd /target/backup/location/EXPORT_REF_NO/
ls -lrth SOURCE_TABLENAME_EXP_%U.dmp --------> scp files are here

-----------------------------------------------------------------------------------------------------------
@ Target
--------

TAKE BACKUP (EXPORT) OF TABLES BEING RESTORED AT DESTINATION DATABASE  

PAR file
--------
vi SOURCE_TABLENAME_EXPDP.par -----> same as above PAR file at SOURCE DATABASE
:wq

nohup expdp  parfile=SOURCE_TABLENAME_EXPDP.par &

Check the status of EXPORT
--------------------------
http://select-star-from.blogspot.in/2013/06/longops-export.html

EXP
---
If the size is less better to go for EXP/IMP
nohup exp userid=\'/ as sysdba \' file=TARGET_TABLENAME_EXP.dmp log=TARGET_TABLENAME_EXP.log tables=OWNER.TABLENAME &


------------------------------------------------------------------------------------------------------------

@ Target
--------

PAR file
--------
vi IMPORT_SOURCE_TABLENAME_EXP.par

userid="/ as sysdba"
directory=DATAPUMP_DIR
dumpfile=SOURCE_TABLENAME_EXPDP_%U.dmp  logfile=IMPORT_SOURCE_TABLENAME_EXP.log
tables=SOURCE_SCHEMANAME.TABLENAME1, SOURCE_SCHEMANAME.TABLENAME2
SCHEMAS=SOURCE_SCHEMANAME1,SOURCE__SCHEMANAME2
JOB_NAME=SOURCE_TABLE_IMPORT
PARALLEL=6

IF REQUIRED:
-----------
TABLE_EXISTS_ACTION=TRUNCATE
REMAP_SCHEMA=SOURCE_SCHEMANAME1:DESTINATION_SCHEMANAME1,
SOURCE_SCHEMANAME2:DESTINATION_SCHEMANAME2
REMAP_TABLESPACE=SOURCE_SCHEMA_TABLESPACE1: DESTINATION_SCHEMA_TABLESPACE1,
SOURCE_SCHEMA_TABLESPACE2: DESTINATION_SCHEMA_TABLESPACE2
EXCLUDE=CONSTRAINT,REF_CONSTRAINT,INDEX
EXCLUDE=TABLE:in('<TABLE_NAME1>','<TABLE_NAME2>')
:wq

nohup impdp parfile = IMPORT_SOURCE_TABLENAME_EXP.par &


Check the status of IMPORT
--------------------------
http://select-star-from.blogspot.in/2013/06/longops-import.html


Validation of Import of tables
------------------------------

set pages 50000 lines 32767
col owner format a15
col object_type format a20
col object_name format a30
select owner, object_name, object_type,status,created,last_ddl_time from dba_objects where object_name in ('&object_name') and owner='&owner' order by object_type, object_name desc
/

NOTE:
THE TARGET TABLE ROW COUNT SHOULD BE SAME AS THE SOURCE TABLE ROW COUNT CAPTURED

-----------------------------------------------------------------------------------------------------

November 24, 2012

Database BLACKOUT

OEM BLACKOUT
------------

 
OEM ->
Targets ->
enter the target dbhost name ->
select blackout ->
enter blackout name ->
select the reason ->
select add(if required to add more) ->
select all ->
next ->
select immediatetime from & select end time ->
next

 To know the particular database is configured with OEM or not:
-------------------------------------------------------------

echo $AGENT_HOME
/u01/app/oracle/product/agent10g/

cd $AGENT_HOME/bin
./emctl config agent listtargets


To check the status of all the blackouts on a host:
--------------------------------------------------

./emctl status blackout


To set a blackout for all targets/databases on a host:
-----------------------------------------------------

emctl start blackout <Blackoutname> [-nodeLevel] [-d <Duration>]

"-nodeLevel" tells the agent to stop monitoring all targets on the server.
"-d Duration" allows you to set a duration in the format of [days] hh:mm.

cd $AGENT_HOME/bin
./emctl start blackout alltargets_onserver –nodeLevel----->Blackout entire host indefinitely
<Perform Maintenance Tasks>

To stop blackout "alltargets_onserver" immediately:
--------------------------------------------------

cd $AGENT_HOME/bin
./emctl stop blackout alltargets_onserver


To set a blackout for a database on a host:
------------------------------------------

emctl start blackout <Blackoutname> [<Target_name>:<Target_Type>]…. [-d <Duration>]

"-nodeLevel" tells the agent to stop monitoring all targets on the server.
"-d Duration" allows you to set a duration in the format of [days] hh:mm.

cd $AGENT_HOME/bin
./emctl start blackout Blackoutname database1 -d 6:00
<Perform Maintenance Tasks>

To stop blackout "Blackoutname" immediately:
-------------------------------------------

./emctl stop blackout Blackoutname

To get the help menu for emctl
------------------------------

emctl blackout


Examples:
--------

To start an immediate indefinite blackout called "Blackoutname" for all targets on the host:
./emctl start blackout Blackoutname -nodeLevel

To start an immediate blackout called "Blackoutname" for all targets on the host  for 6 hours:
./emctl start blackout Blackoutname -nodeLevel -d 06:00

To start an immediate blackout called "Blackoutname" for database "database1" for 30 minutes:
./emctl start blackout Blackoutname database1 -d 30

To start an immediate blackout called "Blackoutname" for database "database2" for 6 hours:
./emctl start blackout Blackoutname database2 -d 6:00

To start an immediate blackout called "Blackoutname" for databases "database1","database2" and listener "listener1" which will last for 5 days 3 hours and 30 minutes.
./emctl start blackout Blackoutname database1 database2 listener1:oracle_listener -d 5 03:30

OEM:Daily / Weekly Backups not happening

Issue : 
Daily incremental/ weekly full backups scheduled through OEM is not happening as scheduled.

Troubleshooting:

1) First check whether emagent is running or not on that database node?
    a) ps -efa|grep agent

        - confirms the agent is running.

    b) cd /home/oracle/OEM_GRID/agent10g/bin/
       ./emctl status agent

       - The output says agent is up and running fine properly.

2) Checked whether agent upload is happening or not?
    ./emctl upload agent

    - ERROR. 
    May be the same thing is not allowing the backups to happen as scheduled.

Fix :
    cd /home/oracle/OEM_GRID/agent10g/bin/
    ./emctl stop agent
    ./emctl clearstate agent
    ./emctl start agent
    ./emctl upload agent

After this, scheduled OEM jobs will run as expected.

November 19, 2012

FIND


To find a file/directory
------------------------
find /home -name oraInventory -print
find /home|grep oraInventory

To delete files older than 60 days
----------------------------------
find . -name "*.gz" -depth -mtime +60 -exec rm {} \;

SOURCE : Internet

Archivelog deletion using RMAN


rman target /

list archivelog all backed up 1 times to DEVICE TYPE DISK completed before 'sysdate-3';

delete noprompt archivelog all backed up 1 times to DEVICE TYPE DISK completed before 'sysdate-3';

If you know that the logs are unavailable because you deleted them by using an operating system utility, then run the following command at the RMAN prompt to update RMAN metadata:
CROSSCHECK ARCHIVELOG ALL;

It is always better to use RMAN to delete logs than to use an operating system utility. The easiest method to remove unwanted logs is to specify the DELETE INPUT option when backing up archived logs.

For example, enter:
BACKUP DEVICE TYPE sbt ARCHIVELOG ALL DELETE ALL INPUT;

TAR

tar -cvf newname.tar directory_name (to tar)
tar -xvf filename.tar (to untar)

Hanganalyze

oradebug setmypidoradebug unlimit;
oradebug hanganalyze 3 or 12
oradebug tracefile_name;
oradebug close_trace
oradebug setmypid
oradebug unlimit;
oradebug dump systemstate 266
oradebug tracefile_name;
oradebug close_trace

USER last login details

SELECT TO_CHAR(TIMESTAMP#,'MM/DD/YY HH:MI:SS') TIMESTAMP,
USERID, AA.NAME ACTION FROM SYS.AUD$ AT, SYS.AUDIT_ACTIONS AA
WHERE AT.ACTION# = AA.ACTION
and AA.name='LOGON'
and userid in
('&User_id')
ORDER BY TIMESTAMP# DESC;
select OS_USERNAME,action_name,USERNAME,to_char(timestamp, 'DD MON YYYY hh24:mi') logon_time,
to_char(logoff_time,'DD MON YYYY hh24:mi') logoff
from dba_audit_session where username ='&user'
AND (timestamp > (sysdate - 61))
order by logon_time,username,timestamp,logoff_time;

STREAMS - Capture/Propagation/Apply



----chk_capture.sql
set feedback off
set heading on
set linesize 120
column capture_name format a25
column status format a30
column status_change_time format a15
column capture_time format a15
column last_enqueued_scn format 99999999999999999999
column start_scn format 99999999999999999999
column first_scn format 99999999999999999999
column captured_scn format 99999999999999999999
select c.capture_name
, c.status || ' - ' || DECODE(v.state,'DICTIONARY INITIALIZATION','DICT. INITIALIZE',v.state) || ' n' || inst_id STATUS
, to_char(c.status_change_time,'DD-MON-YY HH24:MI') STATUS_CHANGE_TIME
, c.captured_scn, to_char(v.capture_time,'DD-MON-YY HH24:MI') CAPTURE_TIME
from dba_capture c, gv$streams_capture v
where c.capture_name = v.capture_name (+)
/
set feedback on

----chk_propagation.sql
column propagation_status format a100
select s.DESTINATION || ' State:' || DECODE(s.SCHEDULE_DISABLED,'Y','DISABLED','N','ENABLED ', 'UNKNOWN ') || ' Msgs:' || p.total_msgs || ' Errs:' || FAILURES PROPAGATION_STATUS
FROM dba_queue_schedules s, gv$propagation_sender p
where p.dblink = s.destination
and s.message_delivery_mode='PERSISTENT';

----chk_apply
COLUMN APPLY_NAME HEADING 'Apply Process|Name' FORMAT A25
COLUMN APPLY_CAPTURED HEADING 'Apply Type' FORMAT A22
COLUMN PROC_NAME HEADING 'Process|Name' FORMAT A7
COLUMN STATE HEADING 'State' FORMAT A17
COLUMN TOTAL_MESSAGES_DEQUEUED HEADING 'Total Events|Dequeued' FORMAT 99999999999
COLUMN msg_number FORMAT 9999999999999999
COLUMN dq_time format a15
COLUMN Status format a10
set linesize 120
set feedback off;
set heading on
set pagesize 80
 
SELECT r.APPLY_NAME,
ap.status || ' n' || s.inst_id STATUS,
SUBSTR(s.PROGRAM,INSTR(S.PROGRAM,'(')+1,4) PROC_NAME,
r.STATE|| ' ApplySrv: ' || DECODE(srv.state,'EXECUTE TRANSACTION','EXEC TRANSXTION',srv.state) STATE,
r.TOTAL_MESSAGES_DEQUEUED,
r.DEQUEUED_MESSAGE_NUMBER MSG_NUMBER,
to_char(r.DEQUEUE_TIME,'DD-MON-YY HH24:MI') DQ_TIME
FROM gV$STREAMS_APPLY_READER r, gV$SESSION s, DBA_APPLY ap, gV$STREAMS_APPLY_SERVER srv
WHERE r.SID = s.SID
and r.inst_id=s.inst_id and r.inst_id=srv.inst_id
AND r.SERIAL# = s.SERIAL#
AND r.APPLY_NAME = ap.APPLY_NAME
AND r.APPLY_NAME = srv.APPLY_NAME
UNION
SELECT apply_name,status, ' ' process_name, '(disabled)' state, NULL total_message_dequeued, NULL msg_number, NULL DQ_TIME
FROM DBA_APPLY
WHERE status != 'ENABLED'
/
set feedback on;

----chk_queues.sql
column QUEUE_NAME format a30
select QUEUE_NAME || ' n' || inst_id QUEUE_NAME,NUM_MSGS,SPILL_MSGS from gv$buffered_queues;

JOBS


Scheduler jobs

set lines 300 pages 100
col JOB_ACTION for a50
select job_name, owner,job_action,state,NEXT_RUN_DATE from dba_scheduler_jobs;

enable a broken job
(use this to enable job in a schema, if you want to enable job of different schema, use dbms_ijob)

exec dbms_job.broken(JOB=>43, NEXT_DATE=>'SYSDATE + 1', broken=>FALSE);
exec dbms_job.broken(JOB=>43, NEXT_DATE=>TO_DATE('09-JUN-11 12:30 AM','DD-MON-YY HH:MI AM'), broken=>FALSE);

Breaking a job

dbms_ijob.broken(<job_number>, Boolean);

change interval of a job

exec dbms_job.next_date(83,TO_DATE('14-JUL-11 12:30 AM','DD-MON-YY HH:Mi PM'));
exec dbms_job.interval(83,'SYSDATE + 1');

disable a job

exec dbms_scheduler.disable('AUTO_SPACE_ADVISOR_JOB');

drop job

exec dbms_scheduler.drop_job('ADV_SEGMENTADV_7420699');

November 18, 2012

AWR


Understanding AWR quick

Quick Instructions For Obtaining The Automatic Workload Repository (AWR) Report [ID 1086120.1]
FAQ: How to Use AWR reports to Diagnose Database Performance Issues [ID 1359094.1]
How to Read PGA Memory Advisory Section in AWR and Statspack Reports [ID 786554.1]
How to Interpret the OS stats section of an AWR report [ID 762526.1]


AWR report
@?/rdbms/admin/awrrpt.sql
@?/rdbms/admin/awrrpti.sql   ------- RAC


AWR snapshot interval
select * from dba_hist_wr_control;
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
AWR snapshots available
set line 150
col startup_time for a30
col END_INTERVAL_TIME for a30
col BEGIN_INTERVAL_TIME for a30
SELECT snap_id, startup_time,BEGIN_INTERVAL_TIME,END_INTERVAL_TIME
FROM dba_hist_snapshot
ORDER BY 1,2;
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
DROP_SNAPSHOT_RANGE

Drop a range of snapshots dbms_workload_repository.drop_snapshot_Range(
low_snap_id IN NUMBER,
high_snap_id IN NUMBER
dbid IN NUMBER DEFAULT NULL);
exec dbms_workload_repository.drop_snapshot_range(1105, 1199);

Historical text



select sql_id,sql_text from dba_hist_sqltext where sql_id='&sql_id';

GRANT SELECT ON SCHEMA OBJECTS

GRANT SELECT ON SCHEMA OBJECTS TO USER THROUGH ROLE
set heading off;

select 'grant select on '||owner|| '.' ||object_name || to ROLE_NAME;' from dba_objects where owner='SCHEMA_NAME';

grant ROLE_NAME to USER_NAME;

Find a role
select * from dba_roles where role like '&role'
/
Show what roles are granted to a userselect grantee,granted_role,admin_option from dba_role_privs
where grantee like ('&username')
/
Show what table privileges are granted to a role
select owner || '.' || table_name "TABLE",column_name,privilege,grantable
from role_tab_privs where role like '&role'
/
Show what system privileges are granted to a role
select privilege,admin_option from role_sys_privs where role like '&role'
/

November 08, 2012

GRANT READ WRITE ON SCHEMA OBJECTS

select 'grant read,write on '||owner||'.'||object_name||' to '||'YOURREADWRITE_USER'||';' from all_objects where owner='&YOUROBJECT_OWNER';