[筆記]OTN上的sql指令碼

husthxd發表於2004-10-11
OTN上一些sql指令碼

------------------------------------------------------------------------------------1
-- 1.Here's a function to calculate the number of seconds elapsed in a given period of time.


create or replace function how_many_seconds (
p_startdate date,
p_enddate date,
p_starttime varchar2,
p_endtime varchar2
)
return number
is
--
mv_sysdate varchar(8) := to_char(sysdate, 'rrrrmmdd');
mn_return_value number;
mn_seconds_per_day number;
mn_total_days number;
mn_seconds_in_start_day number;
mn_seconds_in_end_day number;
--
begin
--
mn_seconds_per_day := to_char(to_date(mv_sysdate || p_endtime,
'rrrrmmddhh24mi'), 'sssss') -
to_char(to_date(mv_sysdate || p_starttime, 'rrrrmmddhh24mi'),
'sssss');
mn_total_days := (trunc(p_enddate) - trunc(p_startdate)) - 1;
mn_seconds_in_start_day := to_char(to_date(mv_sysdate ||
p_endtime, 'rrrrmmddhh24mi'), 'sssss') -
to_char(p_startdate, 'sssss');
mn_seconds_in_end_day := to_char(p_enddate, 'sssss') -
to_char(to_date(mv_sysdate || p_starttime, 'rrrrmmddhh24mi'),
'sssss');
--
if mn_total_days < 0 then
mn_total_days := 0;
mn_seconds_in_start_day := 0;
end if;
--
if mn_seconds_in_start_day < 0 then
mn_seconds_in_start_day := 0;
end if;
--
if mn_seconds_in_end_day < 0 then
mn_seconds_in_end_day := 0;
end if;
--
mn_return_value := mn_seconds_in_start_day + (mn_total_days *
mn_seconds_per_day) + mn_seconds_in_end_day;
--
return mn_return_value;
--
end;
/

select how_many_seconds(to_date('200404281900',
'rrrrmmddhh24mi'),
to_date('200404291000', 'rrrrmmddhh24mi'), '0900', '1500')
"Seconds"
from dual!
/
!
Seconds
----------
3600


------------------------------------------------------------------------------------2
-- 2.To generate comma or pipe delimited output for tools such as Excel, Access, and so on while spooling from SQL*Plus,
-- use settings from SQL*Plus such as:

SET COLSEP ,


or

SET COLSEP |


Example:

set pages 0 feed off
set colsep ,
select
object_name,
object_type,
status,
created
from all_objects
where owner='PUBLIC'
and rownum < 10 /* 9 rows for Sample Output */
;


Output:

ALL_ALL_TABLES ,SYNONYM ,VALID ,01-APR-2004
ALL_ARGUMENTS ,SYNONYM ,VALID ,01-APR-2004
ALL_ASSOCIATIONS ,SYNONYM ,VALID ,01-APR-2004
ALL_CATALOG ,SYNONYM ,VALID ,01-APR-2004
ALL_CLUSTERS ,SYNONYM ,VALID ,01-APR-2004
ALL_CLUSTER_HASH_EXPRESSIONS ,SYNONYM ,VALID ,01-APR-2004
ALL_COLL_TYPES ,SYNONYM ,VALID ,01-APR-2004
ALL_COL_COMMENTS ,SYNONYM ,VALID ,01-APR-2004
ALL_COL_PRIVS ,SYNONYM ,VALID ,01-APR-2004


For pipe delimited output:

set colsep |

select
object_name,
object_type,
status,
created
from all_objects
where owner='PUBLIC'
and rownum < 10 /* 9 rows for Sample Output */
;


Output:

ALL_ALL_TABLES |SYNONYM |VALID |01-APR-2004
ALL_ARGUMENTS |SYNONYM |VALID |01-APR-2004
ALL_ASSOCIATIONS |SYNONYM |VALID |01-APR-2004
ALL_CATALOG |SYNONYM |VALID |01-APR-2004
ALL_CLUSTERS |SYNONYM |VALID |01-APR-2004
ALL_CLUSTER_HASH_EXPRESSIONS |SYNONYM |VALID |01-APR-2004
ALL_COLL_TYPES |SYNONYM |VALID |01-APR-2004
ALL_COL_COMMENTS |SYNONYM |VALID |01-APR-2004
ALL_COL_PRIVS |SYNONYM |VALID |01-APR-2004


For TAB delimited output, you can use the following:

col TAB# new_value TAB NOPRINT
select chr(9) TAB# from dual;
set colsep "&TAB"


select
object_name,
status,
created
from all_objects
where owner='PUBLIC'
and rownum < 10 /* 9 rows for Sample Output */
;


Output:

ALL_ALL_TABLES VALID 01-APR-2004
ALL_ARGUMENTS VALID 01-APR-2004
ALL_ASSOCIATIONS VALID 01-APR-2004
ALL_CATALOG VALID 01-APR-2004
ALL_CLUSTERS VALID 01-APR-2004
ALL_CLUSTER_HASH_EXPRESSIONS VALID 01-APR-2004
ALL_COLL_TYPES VALID 01-APR-2004
ALL_COL_COMMENTS VALID 01-APR-2004
ALL_COL_PRIVS VALID 01-APR-2004


------------------------------------------------------------------------------------3
/*
The objective of this function is to transpose rows to columns.

This RowToCol function is built with invoker-rights AUTHID CURRENT_USER. The function works on tables/views of the user who invokes the function, not on the owner of this function.

RowToCol takes two parameters:

1. SQL Statement: You can pass any valid SQL statement to this function.
2. Delimiter: You can pass any character as a delimiter. Default value is `,?


Examples on how to use RowToCol Function:

Example 1: Where rows to be converted to a column string come from different table.

     SELECT a.deptno, a.dname, a.loc,

     rowtocol('SELECT DISTINCT job FROM emp WHERE deptno = ' ||a.deptno) as jobs

     FROM dept a;


Example 2: Where the content in the WHERE clause is characters, put it in Sting format.

Notice, the main query and the passing query source is same table (EMP). So, use DISTINCT clause in the main query.
*/

     SELECT DISTINCT a.job

     ,rowtocol('SELECT ename FROM emp WHERE job = ' || '''' || a.job || '''' || ' ORDER BY ename')

     AS Employees

     FROM emp a;

-- Code Listing:

CREATE OR REPLACE

FUNCTION rowtocol( p_slct IN VARCHAR2,

p_dlmtr IN VARCHAR2 DEFAULT ',' ) RETURN VARCHAR2

AUTHID CURRENT_USER AS

/*

1) Column should be character type.
2) If it is non-character type, column has to be converted into character type.
3) If the returned rows should in a specified order, put that ORDER BY CLASS in the SELECT statement argument.
4) If the SQL statement happened to return duplicate values, and if you don't want that to happen, put DISTINCT in the SELECT statement argument.

*/

TYPE c_refcur IS REF CURSOR;

lc_str VARCHAR2(4000);

lc_colval VARCHAR2(4000);

c_dummy c_refcur;

l number;

BEGIN

OPEN c_dummy FOR p_slct;

LOOP

FETCH c_dummy INTO lc_colval;

EXIT WHEN c_dummy%NOTFOUND;

lc_str := lc_str || p_dlmtr || lc_colval;

END LOOP;

CLOSE c_dummy;

RETURN SUBSTR(lc_str,2);

/*

EXCEPTION

WHEN OTHERS THEN

lc_str := SQLERRM;

IF c_dummy%ISOPEN THEN

CLOSE c_dummy;

END IF;

RETURN lc_str;

*/

END;

/

------------------------------------------------------------------------------------4
-- The following lists the top 10 segments by V$SEGSTAT statistic name (since the instance was started). I usually employ this method to help determine which segments are responsible for most of the Logical I/O or Buffer Busy Waits on my system. Once I have this information, then I will search for SQL statements to tune in either my statspack report or by querying V$SQL.

     set feed off pagesize 42 linesize 132 veri off echo off pause off

     col value form 999,999,999,999

     col owner form a12

     col object_name head 'Object Name'form a30

     col object_type head 'Object Type'form a18

     col statistic_name head 'Statistic Name' form a25

     set head off feed off

     select lpad(rownum,36)||'. '||statname

     from (select distinct(statistic_name) statname

     from v$segstat

     order by statistic_name)

     /

     prompt

     ACCEPT statnum prompt 'Which statistic do you wish to retrieve: '

     col statname noprint new_value statname

     select statname

     from (select distinct statistic_name statname

     from v$segstat

     where statistic_name = CASE WHEN &statnum = 1 THEN 'ITL waits'

     WHEN &statnum = 2 THEN 'buffer busy waits'

     WHEN &statnum = 3 THEN 'db block changes'

     WHEN &statnum = 4 THEN 'global cache cr blocks served'

     WHEN &statnum = 5 THEN 'global cache current blocks served'

     WHEN &statnum = 6 THEN 'logical reads'

     WHEN &statnum = 7 THEN 'physical reads'

     WHEN &statnum = 8 THEN 'physical reads direct'

     WHEN &statnum = 9 THEN 'physical writes'

     WHEN &statnum = 10 THEN 'physical writes direct'

     WHEN &statnum = 11 THEN 'row lock waits'

     ! END )

     /

     set head on

     ttitle "-- Top 10 Segments by &statname" skip 2

     select owner, object_name, object_type, value, statistic_name

     from (select DO.owner,

     DO.object_name,

     DO.object_type,

     SS.value,

     SS.statistic_name,

     row_number () over (order by value desc) RN

     from dba_objects DO, v$segstat SS

     where DO.object_id = SS.obj#

     and statistic_name = '&statname')

     where RN <= 10

     /

     prompt

     ttitle off

     set feed on


------------------------------------------------------------------------------------5
/*
Generate SQL*Loader Control File for Any Table

This tip comes from Navroz Kapadia, DBA, Economics & Business Cluster in Toronto, Ontario, Canada.

This script will generate a SQL*Loader control file for any specified table, which can then be customized
as per individual needs. Ensure it is run by a user with SELECT_CATALOG_ROLE.
Parameter 1 is the owner of the table. Parameter 2 is the tablename.
*/


set verify off

set heading off

set embedded on

set recsep off

set pages 500

set lines 80

set feedback off

define &&1 = ''

spool c:your_path_name&&2..ctl

select

'load data

infile ''' || 'C:your_path_name' || upper('&2') || '.CSV'''|| '

into table ' || upper('&1') || '.' || upper('&2') || '

replace

fields terminated by '',''' || '

trailing nullcols'

from all_tables where owner = upper('&1') and table_name = upper('&2')

/

select decode(a.column_id, '1', '(') || a.column_name ||

(case when a.column_id < max(b.column_id) then ',' else ')' END)

from all_tab_columns a, all_tab_columns b

where a.owner = upper('&1') and a.table_name = upper('&2') and

b.owner = upper('&1') and b.table_name = upper('&2')

group by a.column_name, a.column_id

order by a.column_id

/

spool off

set verify on

set heading on

set embedded off

set recsep wrapped

set pages 64

set lines 80

set feedback 6

undefine 1

undefine 2

------------------------------------------------------------------------------------6
/*
Find Out Where Your PL/SQL Session Is

This tip comes from Sudhindra Chillarige, DBA, Global Computer Enterprises in Reston, Virginia.

Suppose you run a relatively long PL/SQL procedure that processes several rows, and you want to track its progress. The following procedure lets you see the total number of rows and the current row being processed, using client_info in the V$SESSION view.

Log in as MYUSER and create the following sample procedure:
*/

CREATE OR REPLACE PROCEDURE PROC_MYPROGRESS

AS

-- declare variables here

L_CNT NUMBER DEFAULT 0;

L_TOTAL NUMBER DEFAULT 100000;

begin

-- get the total number of rows that I am processing

select count(*)

into L_TOTAL

from all_objects;

for x in (SELECT object_name, object_type from all_objects) loop

begin

L_CNT := L_CNT + 1;

DBMS_APPLICATION_INFO.SET_CLIENT_INFO ( 'PROCESSING row ' || L_CNT || '

out of ' || L_TOTAL );

end;

end loop;

DBMS_APPLICATION_INFO.SET_CLIENT_INFO ( 'Done');

end PROC_MYPROGRESS;

/


Execute the above procedure in a SQL session and query the V$SESSION table with the following SQL.


select username, client_info

from v$session

where username = 'MYUSER';


-- This can be easily modified to specific procedures/jobs to track that particular session.

------------------------------------------------------------------------------------7
/*
Optimize Your UNDO Parameters

This tip comes from Marco Gilbert, DBA, MRQ in Ste-Foy, Quebec, Canada.

When you are working with UNDO (instead of ROLLBACK) there are two important things to consider: 1.) the size of the UNDO tablespace, and 2.) the UNDO_RETENTION parameter.

There are two ways to proceed :

1) You can choose to allocate a specific size for the UNDO tablespace and then set the UNDO_RETENTION
parameter to an optimal value according to the UNDO size and the database activity.
If your disk space is limited and you do not want to allocate more space than necessary to the UNDO tablespace,
this is the way to proceed. The following query will help you to optimize the UNDO_RETENTION parameter :

*/


select d.undo_size/(1024*1024) "ACTUAL UNDO SIZE (MEGS)",

substr(e.value,1,25) "UNDO RETENTION (Secs)",

round((d.undo_size / (to_number(f.value) * g.undo_block_per_sec)))

"OPTIMAL UNDO RETENTION (Secs)"

  from (select sum(a.bytes) undo_size

  from v$datafile a,

  v$tablespace b, dba_tablespaces c

where c.contents = 'UNDO'

and c.status = 'ONLINE'

and b.name = c.tablespace_name

and a.ts# = b.ts#) d,

v$parameter e,

v$parameter f,

(select max(undoblks/((end_time-begin_time)*3600*24)) undo_block_per_sec

from v$undostat) g

where e.name = 'undo_retention'

and f.name = 'db_block_size'

;

/*
2) If you are not limited by disk space,
then it would be better to choose the UNDO_RETENTION time that is best for you (for FLASHBACK, etc.).
Allocate the appropriate size to the UNDO tablespace according to the database activity:
*/

select d.undo_size/(1024*1024) "ACTUAL UNDO SIZE (MEGS)",

substr(e.value,1,25) "UNDO RETENTION (Secs)",

(to_number(e.value) * to_number(f.value) * g.undo_block_per_sec) /

(1024*1024) "NEEDED UNDO SIZE (MEGS)"

from (select sum(a.bytes) undo_size

from v$dataf! ile a,

v! $tablespace b,

dba_tablespaces c

where c.contents = 'UNDO'

and c.status = 'ONLINE'

and b.name = c.tablespace_name

and a.ts# = b.ts#) d,

v$parameter e,

v$parameter f,

(select max(undoblks/((end_time-begin_time)*3600*24)) undo_block_per_sec

from v$undostat) g

where e.name = 'undo_retention'

and f.name = 'db_block_size'

;

/*
The previous query may return a "NEEDED UNDO SIZE" that is less than the "ACTUAL UNDO SIZE". If this is the case, you may be wasting space. You can choose to resize your UNDO tablespace to a lesser value or increase your UNDO_RETENTION parameter to use the additional space.

WARNING: Because these queries use the V$UNDOSTAT statistics, run the queries only after the database has been running with UNDO for a significant and representative time.
*/

------------------------------------------------------------------------------------8
/*
Deleting Duplicate SYS Constraints

This tip comes from Devanshi Parikh, Sr. DBA, Ernst and Young LLP in Lyndhurst, New Jersey.

System generated (redundant) check constraints can really drain the performance of a database. But sometimes, such as when using third party tools with Oracle, it becomes impossible to avoid them. I have developed a script, which is actually a UNIX Korn shell script, that retrives information from the Oracle dictionary tables, and then formats the output to generate SQL statements that can be executed in the database. Also, the script puts you in more control since, because you decide which and how many tables you are interested in.

To run the script, first create a text file with one column, and one or more entries in the form tablename( Uppercase ). The file can include any number of table names. The script accepts this text file's name as the input argument. Each table listed in the input file is read and passed to Oracle Database to retrive the relevent information.

Note that I have used hard coded the schema name and password in the script, which you can edit.

For each table three files in the format _.tmp, _.txt and _.sql are generated by the script in the working directory. The .tmp and .txt files are intermidiate files while the .sql files contain the actual drop constraint statements. The script leaves out the originally created check constraints; it generates statements for the constraints that are genuine duplicates.

The script assumes that each column originally did not have more than 2 distinct SEARCH_CONDITION values. (This is true in most cases, because the columns end up having a NOT NULL and a LENGTH condition in the SEARCH_CONDITION.) In this kind of a scenerio, if data migrations occur between databases, with the CONSTRAINTS=N clause missing, you end up with lots of duplicate check constraints.

Run the script, search.sh, from the UNIX prompt:
*/

#!/us! r/bin/ksh

#

# search.sh

# Name of the file containing table names.

#

# First Written : 02/12/04 Devanshi Parikh

#

INFILE=$1

infile=`cat ${INFILE}`

for TAB_NAME in ${infile}

do

rm ${TAB_NAME}_${ORACLE_SID}.txt

rm ${TAB_NAME}_${ORACLE_SID}.tmp

rm ${TAB_NAME}_${ORACLE_SID}.sql

sqlplus -s sysadm/config <

spool ${TAB_NAME}_${ORACLE_SID}.tmp

set head off

select '${TAB_NAME}' from dual;

whenever sqlerror exit 1

column search_condition format a35

column constraint_name format a20

column column_name format a22

select a.constraint_name,b.column_name,search_condition

from user_constraints a,user_cons_columns b

where a.table_name=b.table_name and

a.constraint_type='C' and a.constraint_name=b.constraint_name

and a.table_name = '${TAB_NAME}'

order by b.column_name,last_change;

spool off

exit

!

cat ${TAB_NAME}_${ORACLE_SID}.tmp | grep -v "IS NOT NULL" |

sed '/^$/d' > ${TAB_NAME}_${ORACLE_SID}.txt

table_name=`grep -v "^SYS_" ${TAB_NAME}_${ORACLE_SID}.txt | grep -v

rows`

column_name='aa'

constraint_name='aa'

grep "^SYS_" ${TAB_NAME}_${ORACLE_SID}.txt |

while read cons_line

do

constraint_name_next=`echo $cons_line |cut -d " " -f 1`

column_name_next=`echo $cons_line |cut -d " " -f 2`

if [ ${column_name_next} = ${column_name} ]

then

echo "ALTER TABLE SYSADM.${table_name} DROP CONSTRAINT

${constraint_name_n

ext};" >> ${TAB_NAME}_${ORACLE_SID}.sql

fi

column_name=${column_name_next}

constraint_name=${constraint_name_next}

done

done

exit

------------------------------------------------------------------------------------9
/*
Blank Lines and Beginning Spaces with DBMS_OUTPUT.PUT_LINE

This tip comes from Pasupuleti Sailaja, in Hyderabad, India.

To display a blank line you can use chr(0) with DBMS_OUTPUT.PUT_LINE.
To display beginning spaces you can use chr(9) or chr(160) with DBMS_OUTPUT.PUT_LINE.

Use chr(9) for fewer spaces and chr(160) for more spaces.

Example:

*/

     set serveroutput on feedback off

     prompt

     begin

     dbms_output.put_line('This is 1st message Line and next line is blank.');

     dbms_output.put_line(chr(0));

     dbms_output.put_line('This is 2nd message Line and next line is blank.');

     dbms_output.put_line(chr(0));

     dbms_output.put_line('This is 3rd message Line.');

     dbms_output.put_line(chr(9)||' This is 4th message line with beginning spaces.')! ;

     dbms_output.put_line(chr(160)||' This is 5th message line with beginning spaces.');

     end;

     /

     prompt

     set serveroutput off feedback on feedback 6

     ----------------------------------------------------------------------------------------------------------------


Output:

     This is 1st message Line and next line is blank.

     This is 2nd message Line and next line is blank.

     This is 3rd message Line.

     This is 4th message line with beginning spaces.

     This is 5th message line with beginning spaces.


------------------------------------------------------------------------------------10
/*
Separating Comma Separated Strings

This tip comes from Ramesh Periyasamy, Programmer/Analyst, in Iselin, New Jersey.

This function will split an input string separated by commas into a string enclosed with single quotes. This is helful in applying conditions in dynamic SQL.

dyn_string1 Function:
*/

     create or replace function dyn_string1(ab varchar2)return varchar2 as

     t varchar2(2):=',';

     t1 varchar2(2):='''';

     t2 varchar2(32767);

     t_check number(10);

     a varchar2(32767);

     begin

     a:=ab;

     -- dbms_output.put_line('The string passed is '||a);

     t_check:=instr(a,',');

     -- dbms_output.put_line('The instr value is '||t_check);

     if t_check!=0 then

     t2:=t1||t||t1;

     a:=replace(a,',',t2);

     a:='('||''''||a||''''||')';

     -- dbms_output.put_line(a);

     else

     a:=a;

     a:='('||''''||a||''''||')';

     end if;

     return a;

     END ;


Example:

     select dyn_string1('2334,3! 45,5656,6767,7878,78989,8989,8989,9090,9090,90,90,9090,90,090,,9090,909) from dual


Output:

     DYN_STRING1('2334,345,5656,6767,7878,78989,8989,8989,9090,9090,90,90,9090,90,090,9090,909)

     ------------------------------------------------------------------------------------------

     ('2334','345','5656','6767','7878','78989','8989','8989','9090','9090','90','90','9090','90','090','','9090','909')


To use this dyn_string1 function along with dynamic SQL:


Create or replace PROCEDURE test(actcode IN VARCHAR2,fr_date IN DATE,to_date IN DATE,)

     AS

     B date;

     C date;

     actcode VARCHAR2(32767);

     sql_stmt varchar2(32767);

     Begin

     a:=dyn_string1(actcode);

     b:=fr_date;

     c:=to_date;


     sql_stmt:='INSERT INTO Table2 select A1.NYCWAY_CASE_NUMBER

     FROM table1 a

     WHERE A.column1 in '||a||' and

     A.column2 between '||''''||b||''''||' AND '||''''||c||''''||' ;


     execute immediate sql_stmt ;

     end;


------------------------------------------------------------------------------------11
/*
Generate Spool File Layout

This tip comes from Gautam Arora, DBA, in NY, USA.

Use this simple SQL script--lay.sql--to generate the layout for the spooled file. Run the script before spooling the table to get the layout.

     Layout contains :
     ------------------
     Column Name,
     Data Type,
     Length of Column,
     Start Position of Column,
     End Position of Column,
     SQL*LOADER script,
     R/L of Rec is "End Position" in Last Line

Paste the following script into c:lay.sql, and run this script at the SQL prompt.
*/

set linesize 400
set heading off
set pagesize 9999
select column_name
,data_type
,len
,start_pos
,end_pos
,(case when (data_type='CHAR' or data_type='VARCHAR2')
then ','||lower(column_name)||' '||'position('||start_pos||':'||end_pos||')'
when data_type='NUMBER'
then ','||lower(column_name)||' '||'position('||start_pos||':'||end_pos||')
integer external defaultif ('||st!art_pos||':'||end_pos||')=blanks'
when data_type='DATE'
then ','||lower(column_name)||' '||'position('||start_pos||':'||end_pos||') date nullif
('||start_pos||':'||end_pos||')=blanks' end) as scr
from (select column_name, data_type, len, ((end_pos-len)+1) as start_pos, end_pos
from (select column_name, data_Type, len, sum(len) over (order by column_id) as end_pos
from (select column_name
,column_id
,substr(data_type,1,10) data_Type
,decode(data_type,
'NUMBER',(case when length(column_name) <= 10
then 10
when length(column_name) > 10
then length(column_name)
! ! end)
,'CHAR',data_length
,'DATE',length(sysdate)) as len
from user_tab_cols
where table_name='&table_name'
order by column_id)));

Example: You want to spool scott.emp.


     SQL>spool c:emp.lay
     SQL>@c:lay.sql

Note the R/L (End Pos in Last Line) from emp.lay. Now you are ready to 'spool set' the following and change the line size with R/L:


     SQL> set linesize 77
     SQL> set colsep ""
     SQL> set pagesize 0
     SQL> set newpage none
     SQL> set feedback off
     SQL> set termout off
     SQL> set recsep off
     SQL> set sqlprompt ""
     SQL> set heading off
     SQL> set echo off

     SQL> spool c:emp.txt
     SQL> select * from emp;
     SQL> spool off


------------------------------------------------------------------------------------12
/*
Using LogMiner

This tip comes from Frank Pantaleo, Database Technologist, in Rochester, NY.

This tip is about using LogMiner to replicate data between a production instance and a remote instance. The biggest advantage is this technique does not require any changes to the production instance if you are just capturing inserts. If you want to capture updates/deletes there are issues in Oracle8i. Oracle8i does not capture key information in logs as it does in Oracle9i. I have a work around for this, but it does not work very well for high volume tables.

This body of work that follows was born out of a desire to replicate data out of production. Of course we needed to do this while minimizing the effect to production. The best way to do this was to leverage API?s that Oracle made available in Oracle8i. Some or all of this logic provided becomes unnecessary in Oracle9i with the advent of Oracle Streams. (Although I would contend that this provides a better level of control. I made a pitch to a technical community that I am involved in. The pitch was based on a article from George Jucan at . The article was called "Using Oracle LogMiner as a Data Replication Utility". The article explains the way a program or set of programs could make use of this api. Our business had already leveraged information available in the LogMiner to track activity in an application. )

There are some C applications are involved in this as well. I am only supplying the PL/SQL and C Oracle external functions as I feel this is enough to get started. Some of these file and directory procdures were created to reproduce functions that are now available in Oracle9i.

Dir_proc: C Oracle external application to get a list of all files in a unix directory
File_proc: C Oracle external application to determine the existence of a UNIX file
File_del_proc: C oracle external application to delete a file in /tmp
Get_ora_tab: Function that drives the diy$oratab view
Logminer_stats: Procedure that returns counts of local logmnr_contents store
Load_logminer: The workhorse of the application. Identifies, Extracts, and deletes archive log into local store


For Oracle8i LogMiner, I have another program that creates a known state. Oracle8i LogMiner captures the rowid for update and deletes, but Oracle9i LogMiner captures additional information on deletes and updates, including whether the table being captured has a primary/unique key.

For Oracle8i LogMiner, I have another program that creates a known state:
The initial table state is captured from the source instance, including the rowid of each row from the source table, and I capture this in a column called drowid. The drowid is used to apply the update or delete to the destination table data. Once a state is captured, I can then move on to the logic supplied here and extract changes from a set of archive logs.

So the process is:

1. Enable archive logging in source instace.
2. Create a known state--this is only necessary in Oracle8i and then only if you need to worry about update/deletes.
3. Copy all archive logs from source to the machine where the destination instance lives; this process is ongoing afterward.
4. Change the init.ora of the destination instance to have log_archive_dest point to the archive logs that have been captured from the source instance.
5. At regular intervals:
a. run the load_logminer proc provided
b. apply the dml in logmnr_contents to the destination instance
c. update checkpoint_change# in lmstate_checkpoint as each dml is applied
d. commit after the scn changes in logmnr_contents table


-------------------------- pl/sql source --------------------
First I create a schema with dba rights (lmdba):


create user lmdba identified by password default tablespace lmdata
temporary tablespace temp;
grant dba,connect to lmdba;
grant execute on dbms_logmnr to lmdba;
grant execute on dbms_logmnr_d to lmdba;
grant select on sys.v_$database to lmdba;
grant select on sys.v_$parameter to lmdba;
grant select on sys.GV_$LOGMNR_logs to lmdba;
grant select on sys.GV_$LOGMNR_contents to lmdba;
grant select on sys.V_$LOGMNR_contents to lmdba;
grant execute on dbms_backup_restore to lmdba;
grant analyze any,select any table,delete any table,insert any table, update any table to lmdba;
grant alter any table to lmdba;
grant drop any table to lmdba;

Then as the lmdba user:
*/

drop view diy$oratab;
drop function get_oratab;
drop type oratab_type;
drop type oratab_row_type;

drop LIBRARY os_lib ;
CREATE LIBRARY os_lib is '/u01/apps/oracle/product/8.1.7/lib/osfunc.so';
/
CREATE OR REPLACE procedure dir_proc (FileName IN char,Path in char)
    as external
    library os_lib
    name "dir_func"
    language C
    parameters
   (FileName string,Path string);
/
--
CREATE OR REPLACE procedure file_proc (FileName IN char, bexists out binary_integer,file_size out binary_integer, block_count out binary_integer)
    as external
    library os_lib
    name "file_func"
    language C
    with context
    parameters
   (context,
    FileName string,
    bexists int,
    file_size int,
    block_count int
);
/
CREATE OR REPLACE procedure file_del_proc (FileName IN char)
    as external
    library os_lib
    name "file_del_func"
    language C
    parameters
   (FileName string);
/
drop TABLE LMSTATE_CHECKPOINT;
CREATE TABLE LMSTATE_CHECKPOINT (CHECKPOINT_CHANGE#  NUMBER);
--
drop TABLE LMSUBSCRIBE;
CREATE TABLE LMSUBSCRIBE (
  OWNER       VARCHAR2 (30),
  TABLE_NAME  VARCHAR2 (30) ) ;
--
drop TABLE LMTABLES;
CREATE TABLE LMTABLES (
  OWNER       VARCHAR2 (30),
  TABLE_NAME  VARCHAR2 (30),
  LOAD_ORDER  numeric default 10 not null) ;
--
drop TABLE LM_LOG;
CREATE TABLE LM_LOG (LM_STATE VARCHAR2(2000));
--
drop table LOGMNR_CONTENTS;
create table LOGMNR_CONTENTS (
  SCN           NUMBER,
  TIMESTAMP     DATE,
  THREAD#       NUMBER,
  LOG_ID        NUMBER,
  XIDUSN        NUMBER,
  XIDSLT        NUMBER,
  XIDSQN        NUMBER,
  RBASQN        NUMBER,
  RBABLK        NUMBER,
  RBABYTE       NUMBER,
  UBAFIL        NUMBER,
  UBABLK        NUMBER,
  UBAREC        NUMBER,
  UBASQN        NUMBER,
  ABS_FILE#     NUMBER,
  REL_FILE#     NUMBER,
  DATA_BLK#     NUMBER,
  DATA_OBJ#     NUMBER,
  DATA_OBJD#    NUMBER,
  SEG_OWNER     VARCHAR2(32),
  SEG_NAME      VARCHAR2(32),
  SEG_TYPE      NUMBER,
  SEG_TYPE_NAME VARCHAR2(32),
  TABLE_SPACE   VARCHAR2(32),
  ROW_ID        VARCHAR2(19),
  SESSION#      NUMBER,
  SERIAL#       NUMBER,
  USERNAME      VARCHAR2(32),
  SESSION_INFO  VARCHAR2(4000),
  ROLLBACK      NUMBER,
  OPERATION     VARCHAR2(32),
  SQL_REDO      VARCHAR2(4000),
  SQL_UNDO      VARCHAR2(4000),
  RS_ID         VARCHAR2(32),
  SSN           NUMBER,
  CSF           NUMBER,
  INFO          VARCHAR2(32),
  STATUS        NUMBER,
  PH1_NAME      VARCHAR2(32),
  PH1_REDO      VARCHAR2(2000),
  PH1_UNDO      VARCHAR2(2000),
  PH2_NAME      VARCHAR2(32),
  PH2_REDO      VARCHAR2(2000),
  PH2_UNDO      VARCHAR2(2000),
  PH3_NAME      VARCHAR2(32),
  PH3_REDO      VARCHAR2(2000),
  PH3_UNDO      VARCHAR2(2000),
  PH4_NAME      VARCHAR2(32),
  PH4_REDO      VARCHAR2(2000),
  PH4_UNDO      VARCHAR2(2000),
  PH5_NAME      VARCHAR2(32),
  PH5_REDO      VARCHAR2(2000),
  PH5_UNDO      VARCHAR2(2000)
);
--
drop TABLE get_oratab_setting;
CREATE GLOBAL TEMPORARY TABLE get_oratab_setting
(thedir varchar2(200))
ON COMMIT PRESERVE ROWS;
--
Create Type Oratab_Row_Type As Object ( FILE_NAME  Varchar2(100)); 
/
Create Type Oratab_Type Is Table Of Oratab_Row_Type;

--
Create Or Replace Function GET_ORATAB Return Oratab_Type Is
   Ora_Tab         Oratab_Type := Oratab_Type( Oratab_Row_Type( Null ) );
   f_Handle        Utl_File.File_Type;
   i_Pos           Integer;
   v_file_name     Varchar2(100);
   b_Read          Boolean := TRUE;
   b_First         Boolean := TRUE;
   tmp_file        varchar2(50);
   mydir    varchar2(200);
Begin
   --
   -- Note that in order to make the code shorter all the
   -- utl_file defined exceptions are left unhandled.
   -- 
   tmp_file :='oracle_' || to_char(sysdate,'yyyymmddhhss');
   begin
      select thedir into mydir from get_oratab_setting;
      Exception
         When NO_DATA_FOUND Then mydir := null;
   end;
   if mydir is not null then
    dir_proc('/tmp/' || tmp_file,mydir);
    f_Handle := Utl_File.FOpen( '/tmp' , tmp_file, 'r' );
    While b_Read Loop
       Begin
  Utl_File.Get_Line( f_Handle, v_file_name);
  if b_first then
   b_First := FALSE;
  Else
   Ora_Tab.Extend;
  End If;

  Ora_Tab( Ora_Tab.Last ) := Oratab_Row_Type( rtrim(v_file_name));
       Exception
   When NO_DATA_FOUND Then
        b_Read := FALSE;
       End;
    End Loop;
    Utl_File.FClose( f_Handle );
   end if;
   file_del_proc(tmp_file);
   Return Ora_Tab;
End;
/
Create Or Replace View DIY$ORATAB
As Select * From Table( Cast( GET_ORATAB() As Oratab_Type ) );
/
CREATE OR REPLACE  PROCEDURE "LOGMINER_STATS"  (insert_count out integer,delete_count out integer, update_count out integer,total_count out integer) is
       EMPTY_LOGMNR_CONTENTS exception;
       PRAGMA EXCEPTION_INIT(EMPTY_LOGMNR_CONTENTS, -1306);
       cursor the_csr is Select count(*) the_count,operation
       from LOGMNR_contents,lmsubscribe b
       where seg_owner = b.owner
       and seg_name = b.table_name
       and operation in ('INSERT','DELETE','UPDATE')
       group by operation;

begin
     insert_count :=0;
     update_count :=0;
     delete_count :=0;
     total_count :=0;
     for the_rec in the_csr loop
         if the_rec.operation = 'INSERT' then
            insert_count := the_rec.the_count;
         elsif the_rec.operation = 'DELETE' then
            delete_count := the_rec.the_count;
         elsif the_rec.operation = 'UPDATE' then
            update_count := the_rec.the_count;
         else
             null;
         end if;
     end loop;
     total_count := insert_count + delete_count + update_count;
     exception
      when no_data_found then null;
      when empty_logmnr_contents then null;
      when others then
           Raise_application_error(-20000, 'Error in LOGMNR_CONTENTS View');
end logminer_stats;
/
PROCEDURE ?LOAD_LOGMINER" as
EMPTY_LOGMNR_CONTENTS exception;
SID_FILE varchar2(30);
ARCH_DIR varchar2(100);
WORK_FILE varchar2(200);
ARCH_COUNT number :=0;
cursor the_csr is select file_name from diy$oratab;
first_time boolean := true;
start_scn_local number;
PRAGMA EXCEPTION_INIT(EMPTY_LOGMNR_CONTENTS, -1306);
Begin
/* clean the slate of all prior activity */
  delete from LOGMNR_CONTENTS;
  delete from lm_log;
  delete from get_oratab_setting;
  commit;
/* determine location of archive logs so we can get a directory of log_archive_dest */
  select value into arch_dir from sys.v_$parameter where name = 'log_archive_dest';
/* prime get_oratab_setting with directory name of archive logs */
  insert into get_oratab_setting (thedir) values (arch_dir);
  commit;
/* determine checkpoint of what has been applied to date */
  select CHECKPOINT_CHANGE# into start_scn_local from lmstate_checkpoint; 
/* go through each archive log and add to the local logmnr_contents table where applicable */
  begin
      for the_rec in the_csr loop
          work_file := the_rec.file_name;
          if work_file is not null then
               insert into lm_log (lm_state) values ('start loading archive log ' || work_file);
               commit;
               SYS.DBMS_LOGMNR.ADD_LOGFILE( LogFileName=>work_file, Options=>SYS.DBMS_LOGMNR.NEW);
               first_time := false;
               begin
/* use logfile generated from local or foreign database */
                    SYS.DBMS_LOGMNR.START_LOGMNR(
                       DictFileName=>'/u01/apps/oracle/product/8.1.7/dbs/SEED_dict.ora');
                    commit;
                    exception when others then
                       Raise_application_error(-20000, 'Error in LOAD_LOGMINER start_logmnr ' ||
                       sqlerrm || sqlcode);
               end;
               begin
  /* strip what we want out of v_$logmnr_contents into our local copy */
                    insert /*+ APPEND */ into LOGMNR_CONTENTS
                          select a.* from sys.V_$LOGMNR_CONTENTS a;
      /*Add any conditional logic here e.g. ?*/
                          /*where seg_owner = 'SOME_OWNER' and seg_name = ?SOME_TABLE? */
                    commit;
                    exception
                    when no_data_found then null;
                    when EMPTY_LOGMNR_CONTENTS then null;
                    when others then
                    Raise_application_error(-20002,
                    'Error in LOGMNR_CONTENTS View' || sqlerrm || sqlcode);
              end;              
              begin
  /* end for this log and delete it */
                   SYS.dbms_logmnr.end_logmnr;
                   insert into lm_log (lm_state) values ('end loading archive log ' || work_file);
                   commit;
                   sys.dbms_backup_restore.deletefile(work_file);
                   exception when others then null;
              end;
          end if;
      end loop;
      exception when others then
          Raise_application_error(-20010, 'Error in LOAD_LOGMINER add ' ||
                                  work_file || sqlerrm || sqlcode);
  end;
/* ok now we have our local store of activity clean it up and prep it to be used in the apply program. Put the prepped sql in ph1_redo column which is unused */
 begin
  update  LOGMNR_CONTENTS
  set ph1_redo =
  replace(replace(replace(sql_redo,'ROWID','DROWID'),'"',''),';','')
  where ph1_redo is null
  and operation in ('UPDATE','DELETE');
--
  update  LOGMNR_CONTENTS
  set ph1_redo = replace(replace(replace(replace(sql_redo,')
  values',',drowid) values'),');', ',' ||
  chr(39)  || rowidtochar(ROW_ID) ||  chr(39)  || ');'),'"',''),';','')
  where ph1_redo is null
and seg_owner = 'SOME_SCHEMA'
  and operation = 'INSERT';
--
update  LOGMNR_CONTENTS
  set ph1_redo = replace(replace(sql_redo,'"',''),';','')
  where ph1_redo is null
  and seg_owner = 'SOME_OTHER_OWNER'
  and operation = 'INSERT';
--
delete from logmnr_contents where ph1_redo is null;
--
  commit;
 end;
end;

-------------------------------------- c source follows for Oracle external functions ------------------------------------

#include 
#include 
#include 
#include
#include
#include
#ifndef OCI_ORACLE
# include
#endif

void dir_func(char *FileName,char *Path);
void file_func(OCIExtProcContext *,char *,int *,int *,int *); void file_del_func(char *FileName);

void dir_func(char *FileName,char *Path) {
 int num;
        static   FILE   *logfilep = NULL;
        DIR *mydir;
        struct dirent *dp;
 //struct stat mybuff;
 char work_file[100] = " ";

        if ((logfilep = fopen(FileName, "w")) != NULL) {};
//        fprintf(logfilep, "# file = %s path = %sn",FileName,Path);
        mydir = opendir(Path);

        while ((dp = readdir (mydir)) != NULL) {
  if ((strcmp(dp->d_name,".") == 0) ||
      (strcmp(dp->d_name,"..") == 0)) {}
  else {
   strcpy(work_file,dp->d_name);
   //stat(work_file,&mybuff);  
   //fprintf(logfilep, "%s/%s:%i:%in" ,Path,dp->d_name,mybuff.st_size,mybuff.st_blocks);
   fprintf(logfilep, "%s/%sn" ,Path,dp->d_name);
  }
        }

        fclose(logfilep);
}

void file_func(OCIExtProcContext *with_context,char *FileName,
                int *exists,int *filesize,int *block_count) {
        int rtn =0;
        struct stat mybuff;
        long file_size=0;
        char mybyte;

        *exists=1;
//
        rtn = stat(FileName,&mybuff);
        if (rtn ==0) {
                *filesize=mybuff.st_size;
                *block_count=mybuff.st_blocks;
                *exists=0;
        }
        else
        {
                *exists=3;
                *filesize=0;
                *block_count=0;
        }
        *exists=rtn;
}

void file_del_func(char *FileName) {
        int rtn =0;
        struct stat mybuff;
        long file_size=0;
 char myFileName[200] = "/tmp/";
//
 strcat(myFileName,FileName); 
        rtn = stat(myFileName,&mybuff);
        if (rtn ==0) {
  unlink(myFileName);
 }
}


------------------------------------------------------------------------------------13
/*
Comparing Two Tables: Highlighting the Differences

This tip comes from Sanjay Ray, Oracle Applications Technical Consultant in Sydney, Austrailia.


This is an extension to the code tip that I supplied on 21 December 2003, "Comparing Contents of Two Tables with Identical Structure." The following script highlights the columns that are different in the two tables, so there is no need to compare each column individually. The columns values may optionally be displayed as well. This script generates a spool file that can be run from a SQL*Plus session to list the differences between 2 tables with identical structure.

This can be useful if you need to compare two similar tables across different schemas (or different databases, in which case you will need to create a local view using a database link), or to compare a table with itself at different points in time, such as before and after an update through an application to track how a table is affected. It reports whether a record is present in one but not in the other table, or if the same key is present in both tables but non-key columns have different values.
*/

     undefine TABLE1
     undefine TABLE2
     define g_table1 = '&&TABLE1'
     define g_table2 = '&&TABLE2'
     set verify off
     set feedback off
     set serveroutput on size 1000000
     set linesize 132
     set trimspool on
     spo temp_file.sql
     declare
     v_owntab1 varchar2(255) := '&&g_table1';
     v_owntab2 varchar2(255) := '&&g_table2';
     v_own1 varchar2(255);
     v_own2 varchar2(255);
     v_tab1 varchar2(255);
     v_tab2 varchar2(255);
     v_dot1 number := 0;
     v_dot2 number := 0;
     type t_cols is table of varchar2(255) index by binary_integer;
     v_cols1 t_cols;
     v_cols2 t_cols;
     v_keys t_cols;
     v_out1 varchar2(255);
     v_out2 varchar2(255);
     kq CONSTANT varchar2(1) := '''';
     v_ind number := 0;
     v_str varchar2(2000):=null;
     v_ind_found boolean := FALSE;
     v_ind_colno number := 0;
     v_is_key boolean := FALSE;
     v_nonkey number := 0;
     procedure print_cols (p_cols in t_cols) is
     begin
     for i in 1..p_cols.count
     loop
     dbms_output.put_line(','||p_cols(i)); end loop;
     end print_cols;

     begin
     v_dot1 := instr(v_owntab1, '.');
     v_dot2 := instr(v_owntab2, '.');

     if v_dot1 > 0 then
     v_own1 := upper(substr(v_owntab1, 1, v_dot1-1));
     v_tab1 := upper(substr(v_owntab1, v_dot1+1));
     else
     v_own1 := null;
     v_tab1 := upper(v_owntab1);
     end if;

     if v_dot2 > 0 then
     v_own2 := upper(substr(v_owntab2, 1, v_dot2-1));
     v_tab2 := upper(substr(v_owntab2, v_dot2+1));
     else
     v_own2 := null;
     v_tab2 := upper(v_owntab2);
     end if;

     select column_name
     bulk collect into v_cols1
     from all_tab_columns
     where table_name = v_tab1
     and owner = nvl(v_own1, user)
     order by column_id;

     select column_name
     bulk collect into v_cols2
     from all_tab_columns
     where table_name = v_tab2
     and owner = nvl(v_own2, user)
     order by column_id;

     if v_cols1.count = 0 or v_cols2.count = 0 then
     dbms_output.put_line('Either or Both the tables are invalid');
     return;
     end if;

     --Outer select
     dbms_output.put_line('select * from ( ');
     --Main select (key attributes)
     dbms_output.put_line('select ');
     for c1 in (
     select b.column_name
     from all_indexes a, all_ind_columns b
     where a.owner=b.index_owner
     and a.index_name=b.index_name
     and a.uniqueness = 'UNIQUE'
     and a.table_owner = nvl(v_own1, user)
     and a.table_name = v_tab1
     order by b.index_name, b.column_position
     )
     loop
     v_ind_found := TRUE;
     v_keys(nvl(v_keys.count, 0)+1):=c1.column_name;
     dbms_output.put_line('nvl(a.'||c1.column_name||', b.'||c1.column_name||') '||c1.column_name||',');
     end loop;

     if not v_ind_found then
     v_keys(nvl(v_keys.count, 0)+1):=v_cols1(1);
     dbms_output.put_line('nvl(a.'||v_cols1(1)||', b.'||v_cols1(1)||') '||v_cols1(1)||',');
     end if;

     --Identifier column to indicate if the key is present in either, or the other or both tables
     dbms_output.put_line('decode(a.'||v_cols1(1)||', null, '||kq||'TAB2'||kq
     ||', decode(b.'||v_cols1(1)||', null, '||kq||'TAB1'||kq||', '||kq||'BOTH'||kq||')) WHICH, ');

     --Main select (non-key attributes)
     for i in 1..v_cols1.count
     loop
     v_is_key:=FALSE;
     --If the column is a key column, it should be excluded
     for j in 1..v_keys.count
     loop
     if v_cols1(i)=v_keys(j) then
     v_is_key:=TRUE;
     end if;
     end loop;
     if not v_is_key then
     v_nonkey:=v_nonkey+1;
     --The following code has 2 sections. Only one can be activated at any time
     --Presently Section 1 is active, i.e. only column list will be displayed

     -- To display column list without values, uncomment Section 1
     --Start of Section 1 : Column List only. Without values
     if v_nonkey = 1 then
     dbms_output.put_line('decode(a.'||v_cols1(i)||',b.'||v_cols1(i)||', null,'||kq||v_cols1(i)||kq||')');
     else
     dbms_output.put_line('||'||kq||','||kq||'||'||'decode(a.'||v_cols1(i)||',b.'||v_cols1(i)||', null,'||kq||v_cols1(i)||kq||')');
     end if;
     --End

來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/6906/viewspace-21575/,如需轉載,請註明出處,否則將追究法律責任。

相關文章