[20141208]使用logminer看远程归档文件.txt
--如果要在本机看其他主机的归档或者日志文件,由于dbid不一样,是无法查看的.必须要包括字典信息.
--如果需要查看别的数据库的归档文件,必须要有别的数据字典文件。正常需要使用utl_file_dir,而经常这个参数没有设置。
--在使用如下命令建立数据字典。
exec dbms_logmnr.start_logmnr(dictfilename=>'/home/oracle11g/testd/dict_test.ora');
--如果没有建立参数utl_file_dir:
SYS@test> show parameter utl_file_dir
NAME TYPE VALUE
-------------- -------- --------------------
utl_file_dir string
SYS@test> exec dbms_logmnr.start_logmnr(dictfilename=>'/home/oracle11g/testd/dict_test.ora');
BEGIN dbms_logmnr.start_logmnr(dictfilename=>'/home/oracle11g/testd/dict_test.ora'); END;
*
ERROR at line 1:
ORA-01292: no log file has been specified for the current LogMiner session
ORA-06512: at "SYS.DBMS_LOGMNR", line 58
ORA-06512: at line 1
--实际上可以使用EXECUTE DBMS_LOGMNR_D.BUILD(OPTIONS=> DBMS_LOGMNR_D.STORE_IN_REDO_LOGS);前提是建立附加日志。
--就可以把字典放在日志文件中。把归档以及日志在拷贝过了就可以了。自己做一个测试:
1.建立测试环境:
SYS@test> alter system switch logfile ;
System altered.
SYS@test> archive log list;
Database log mode Archive Mode
Automatic archival Enabled
Archive destination USE_DB_RECOVERY_FILE_DEST
Oldest online log sequence 9
Next log sequence to archive 11
Current log sequence 11
SYS@test> EXECUTE DBMS_LOGMNR_D.BUILD(OPTIONS=> DBMS_LOGMNR_D.STORE_IN_REDO_LOGS);
BEGIN DBMS_LOGMNR_D.BUILD(OPTIONS=> DBMS_LOGMNR_D.STORE_IN_REDO_LOGS); END;
*
ERROR at line 1:
ORA-01354: Supplemental log data must be added to run this command
ORA-06512: at "SYS.DBMS_LOGMNR_INTERNAL", line 3669
ORA-06512: at "SYS.DBMS_LOGMNR_INTERNAL", line 3755
ORA-06512: at "SYS.DBMS_LOGMNR_D", line 12
ORA-06512: at line 1
--要求打开附件日志。
SYS@test> alter database add supplemental log data;
Database altered.
SYS@test> archive log list;
Database log mode Archive Mode
Automatic archival Enabled
Archive destination USE_DB_RECOVERY_FILE_DEST
Oldest online log sequence 13
Next log sequence to archive 15
Current log sequence 15
SYS@test> EXECUTE DBMS_LOGMNR_D.BUILD(OPTIONS=> DBMS_LOGMNR_D.STORE_IN_REDO_LOGS);
PL/SQL procedure successfully completed.
SYS@test> archive log list;
Database log mode Archive Mode
Automatic archival Enabled
Archive destination USE_DB_RECOVERY_FILE_DEST
Oldest online log sequence 15
Next log sequence to archive 17
Current log sequence 17
$ ls -l 2014_12_08/
total 18704
-rw-r----- 1 oracle oinstall 9434112 2014-12-08 12:00:16 o1_mf_1_14_b8b8lj2y_.arc
-rw-r----- 1 oracle oinstall 12288 2014-12-08 12:00:55 o1_mf_1_15_b8b8mq3g_.arc
-rw-r----- 1 oracle oinstall 9432064 2014-12-08 12:00:58 o1_mf_1_16_b8b8mt39_.arc
--可以发现发生了两次arhcive log 切换。我估计执行前先切换1次,执行完成后再切换1次。这样数据字典在o1_mf_1_16_b8b8mt39_.arc
--中。
2.建立测试数据:
create table t1 ( id number,name varchar2(20));
insert into t1 values (1,'aaaa');
insert into t1 values (2,'bbbb');
insert into t1 values (3,'cccc');
commit ;
update t1 set name='BBBB' where id=2;
commit ;
delete t1 where id=3;
commit ;
alter system switch logfile ;
3.拷贝archive log文件:
$ scp o1_mf_1_1[67]_*.arc oracle11g@192.168.100.40:/home/oracle11g/testd/
oracle11g@192.168.100.40's password:
o1_mf_1_16_b8b8mt39_.arc 100% 9211KB 9.0MB/s 00:00
o1_mf_1_17_b8b9565v_.arc 100% 977KB 977.0KB/s 00:00
4.使用logminer查看:
--我以前都是在toad里面使用logminer,在sqlplus测试看看。
exec SYS.DBMS_LOGMNR.ADD_LOGFILE( '/home/oracle11g/testd/o1_mf_1_16_b8b8mt39_.arc', sys.dbms_logmnr.New);
exec SYS.DBMS_LOGMNR.ADD_LOGFILE( '/home/oracle11g/testd/o1_mf_1_17_b8b9565v_.arc', sys.dbms_logmnr.AddFile);
exec SYS.DBMS_LOGMNR.START_LOGMNR ( Options => sys.dbms_logmnr.DICT_FROM_REDO_LOGS );
--很慢不知道为什么?
SCOTT@test> SELECT scn, operation, row_id, sql_redo FROM V$LOGMNR_CONTENTS WHERE seg_owner = 'SCOTT' AND table_name = 'T1' ORDER BY 1;
SCN OPERATION ROW_ID SQL_REDO
---------- -------------------------------- ------------------ ------------------------------------------------------------
933853 DDL AAAAAAAAAAAAAAAAAB create table t1 ( id number,name varchar2(20));
--仅仅看到建表语句。
exec dbms_logmnr.end_logmnr;
5.改用如下方式:
exec SYS.DBMS_LOGMNR.ADD_LOGFILE( '/home/oracle11g/testd/o1_mf_1_16_b8b8mt39_.arc', sys.dbms_logmnr.New);
exec SYS.DBMS_LOGMNR.ADD_LOGFILE( '/home/oracle11g/testd/o1_mf_1_17_b8b9565v_.arc', sys.dbms_logmnr.AddFile);
exec SYS.DBMS_LOGMNR.START_LOGMNR ( Options => sys.dbms_logmnr.DICT_FROM_REDO_LOGS+sys.dbms_logmnr.COMMITTED_DATA_ONLY+sys.dbms_logmnr.DDL_DICT_TRACKING );
-- 我最后的测试exec SYS.DBMS_LOGMNR.START_LOGMNR ( Options => sys.dbms_logmnr.DICT_FROM_REDO_LOGS+sys.dbms_logmnr.DDL_DICT_TRACKING );
-- 应该可以看到执行的dml语句
-- 补充资料:(来源包本身)
-- Constants for start_logmnr options flag
NO_DICT_RESET_ONSELECT CONSTANT BINARY_INTEGER := 1;
COMMITTED_DATA_ONLY CONSTANT BINARY_INTEGER := 2;
SKIP_CORRUPTION CONSTANT BINARY_INTEGER := 4;
DDL_DICT_TRACKING CONSTANT BINARY_INTEGER := 8;
DICT_FROM_ONLINE_CATALOG CONSTANT BINARY_INTEGER := 16;
DICT_FROM_REDO_LOGS CONSTANT BINARY_INTEGER := 32;
NO_SQL_DELIMITER CONSTANT BINARY_INTEGER := 64;
PRINT_PRETTY_SQL CONSTANT BINARY_INTEGER := 512;
CONTINUOUS_MINE CONSTANT BINARY_INTEGER := 1024;
NO_ROWID_IN_STMT CONSTANT BINARY_INTEGER := 2048;
STRING_LITERALS_IN_STMT CONSTANT BINARY_INTEGER := 4096;
--
SCOTT@test> SCOTT@test> SELECT scn, operation, row_id, sql_redo FROM V$LOGMNR_CONTENTS WHERE seg_owner = 'SCOTT' AND table_name = 'T1' ORDER BY 1;
SCN OPERATION ROW_ID SQL_REDO
------- ----------- ------------------ ---------------------------------------------------------------------------------------------
933853 DDL AAAAAAAAAAAAAAAAAB create table t1 ( id number,name varchar2(20));
933862 INSERT AAANDuAAEAAAAGeAAA insert into "SCOTT"."T1"("ID","NAME") values ('1','aaaa');
933977 INSERT AAANDuAAEAAAAGeAAB insert into "SCOTT"."T1"("ID","NAME") values ('2','bbbb');
933979 INSERT AAANDuAAEAAAAGeAAC insert into "SCOTT"."T1"("ID","NAME") values ('3','cccc');
933988 UPDATE AAANDuAAEAAAAGeAAB update "SCOTT"."T1" set "NAME" = 'BBBB' where "NAME" = 'bbbb' and ROWID = 'AAANDuAAEAAAAGeAAB';
933994 DELETE AAANDuAAEAAAAGeAAC delete from "SCOTT"."T1" where "ID" = '3' and "NAME" = 'cccc' and ROWID = 'AAANDuAAEAAAAGeAAC';
6 rows selected.
exec dbms_logmnr.end_logmnr;
6.补充测试:
--前面的测试先建立数据字典在redo文件中,后面建立的表自然不在数据字典中,仅仅获得ddl语句,无法获取dml语句,必须加入参数sys.dbms_logmnr.DDL_DICT_TRACKING。
--现在在最后建立数据字典在redo文件中,重复测试:
SYS@test> EXECUTE DBMS_LOGMNR_D.BUILD(OPTIONS=> DBMS_LOGMNR_D.STORE_IN_REDO_LOGS);
PL/SQL procedure successfully completed.
-- scp o1_mf_1_19_b8b9qhjd_.arc oracle11g@192.168.100.40:/home/oracle11g/testd/
exec SYS.DBMS_LOGMNR.ADD_LOGFILE( '/home/oracle11g/testd/o1_mf_1_17_b8b9565v_.arc', sys.dbms_logmnr.New);
exec SYS.DBMS_LOGMNR.ADD_LOGFILE( '/home/oracle11g/testd/o1_mf_1_19_b8b9qhjd_.arc', sys.dbms_logmnr.AddFile);
exec SYS.DBMS_LOGMNR.START_LOGMNR ( Options => sys.dbms_logmnr.DICT_FROM_REDO_LOGS );
--这步特别慢,不知道问题在哪里。
SCOTT@test> SELECT scn, operation, row_id, sql_redo FROM V$LOGMNR_CONTENTS WHERE seg_owner = 'SCOTT' AND table_name = 'T1' ORDER BY 1;
SCN OPERATION ROW_ID SQL_REDO
------- ---------- ------------------ -----------------------------------------------------------------------------------------------
933853 DDL AAAAAAAAAAAAAAAAAB create table t1 ( id number,name varchar2(20));
933862 INSERT AAANDuAAEAAAAGeAAA insert into "SCOTT"."T1"("ID","NAME") values ('1','aaaa');
933977 INSERT AAANDuAAEAAAAGeAAB insert into "SCOTT"."T1"("ID","NAME") values ('2','bbbb');
933979 INSERT AAANDuAAEAAAAGeAAC insert into "SCOTT"."T1"("ID","NAME") values ('3','cccc');
933988 UPDATE AAANDuAAEAAAAGeAAB update "SCOTT"."T1" set "NAME" = 'BBBB' where "NAME" = 'bbbb' and ROWID = 'AAANDuAAEAAAAGeAAB';
933994 DELETE AAANDuAAEAAAAGeAAC delete from "SCOTT"."T1" where "ID" = '3' and "NAME" = 'cccc' and ROWID = 'AAANDuAAEAAAAGeAAC';
6 rows selected.
exec dbms_logmnr.end_logmnr;
--总结:
1.执行EXECUTE DBMS_LOGMNR_D.BUILD(OPTIONS=> DBMS_LOGMNR_D.STORE_IN_REDO_LOGS);
可以把数据字典放在redo文件中。前提是要求打开附件日志。
SYS@test> alter database add supplemental log data;
2.必要时加入 sys.dbms_logmnr.COMMITTED_DATA_ONLY+sys.dbms_logmnr.DDL_DICT_TRACKING参数。
3.本地 使用DICT_FROM_ONLINE_CATALOG参数,不需要建立数据字典。
4.不知道为什么这么慢,差不多11X秒才完成exec SYS.DBMS_LOGMNR.START_LOGMNR ( Options => sys.dbms_logmnr.DICT_FROM_REDO_LOGS );
5.做了10046跟踪:发现
SQL ID: a5w8kbk6udsn5 Plan Hash: 0
INSERT INTO SYS.LOGMNRLT_110_COL$ ( COL#, SEGCOL#, NAME, TYPE#, LENGTH,
PRECISION#, SCALE, NULL$, INTCOL#, PROPERTY, CHARSETID, CHARSETFORM, SPARE1,
SPARE2, OBJ#, LOGMNR_UID )
VALUES
(:01, :2 , :3 , :4 , :5 , :6 , :7 , :8 , :9 , :10, :11, :12, :13, :14, :15,
:16 )
call count cpu elapsed disk query current rows
------- ------ -------- ---------- ---------- ---------- ---------- ----------
Parse 1 0.00 0.00 0 0 0 0
Execute 56243 52.63 54.80 660 804 62305 56243
Fetch 0 0.00 0.00 0 0 0 0
------- ------ -------- ---------- ---------- ---------- ---------- ----------
total 56244 52.63 54.81 660 804 62305 56243
Misses in library cache during parse: 1
Misses in library cache during execute: 1
Optimizer mode: CHOOSE
Parsing user id: SYS (recursive depth: 1)
Number of plan statistics captured: 1
Rows (1st) Rows (avg) Rows (max) Row Source Operation
---------- ---------- ---------- ---------------------------------------------------
0 0 0 LOAD TABLE CONVENTIONAL (cr=2 pr=5 pw=0 time=1650 us)
Elapsed times include waiting on following events:
Event waited on Times Max. Wait Total Waited
---------------------------------------- Waited ---------- ------------
db file sequential read 660 0.11 0.32
latch: shared pool 1 0.00 0.00
********************************************************************************
SQL ID: fa4kq3unwkka6 Plan Hash: 0
SQL ID: 2jv9waqgh5asr Plan Hash: 0
INSERT INTO SYS.LOGMNRLT_110_OBJ$ ( OBJV#, OWNER#, NAME, NAMESPACE, SUBNAME,
TYPE#, OID$, REMOTEOWNER, LINKNAME, FLAGS, SPARE3, STIME, OBJ#, LOGMNR_UID )
VALUES
(:01, :2 , :3 , :4 , :5 , :6 , :7 , :8 , :9 , :10, :11, :12, :13, :14 )
call count cpu elapsed disk query current rows
------- ------ -------- ---------- ---------- ---------- ---------- ----------
Parse 1 0.00 0.00 0 0 0 0
Execute 52018 40.15 43.35 710 931 58400 52018
Fetch 0 0.00 0.00 0 0 0 0
------- ------ -------- ---------- ---------- ---------- ---------- ----------
total 52019 40.15 43.35 710 931 58400 52018
Misses in library cache during parse: 1
Misses in library cache during execute: 1
Optimizer mode: CHOOSE
Parsing user id: SYS (recursive depth: 1)
Number of plan statistics captured: 1
Rows (1st) Rows (avg) Rows (max) Row Source Operation
---------- ---------- ---------- ---------------------------------------------------
0 0 0 LOAD TABLE CONVENTIONAL (cr=2 pr=5 pw=0 time=26723 us)
Elapsed times include waiting on following events:
Event waited on Times Max. Wait Total Waited
---------------------------------------- Waited ---------- ------------
db file sequential read 710 0.30 0.90
log file switch completion 1 0.08 0.08
latch: shared pool 1 0.00 0.00
--主要慢在这里。