0207UNIFORM SIZE=10M index insert分裂2

lfree發表於2017-02-07

[20170207]UNIFORM SIZE=10M index insert分裂2.txt

http://blog.itpub.net/267265/viewspace-2133066/

--前幾天的測試,我發現作者是先delete mytest where x<=199000;提交以後再執行插入:
--insert into mytest with x as (select level i from dual connect by level <= 199000) select i from x where mod(i, 250) = 0;.
--如果我將2個操作合在一起呢,看看會出現什麼情況?
DELETE FROM mytest WHERE x <= 199000 AND MOD (x, 250) != 0;

1.環境:

SCOTT@book> @ &r/ver1

PORT_STRING                    VERSION        BANNER
------------------------------ -------------- --------------------------------------------------------------------------------
x86_64/Linux 2.4.xx            11.2.0.4.0     Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production


2.為了測試方便重複,我寫一個指令碼:
--//說明一下pp指令碼,是顯示跟蹤檔案,網上可以自己找到許多類似的指令碼.

$ cat a1.sql

--引數1建立資料檔案大小 引數2建立UNIFORM SIZE 引數3 manual 或者 auto
--DROP TABLESPACE tea INCLUDING CONTENTS AND DATAFILES;
CREATE TABLESPACE TEA DATAFILE
  '/mnt/ramdisk/book/tea01.dbf' SIZE &&1 AUTOEXTEND ON NEXT 1280K MAXSIZE UNLIMITED
LOGGING
ONLINE
EXTENT MANAGEMENT LOCAL UNIFORM SIZE &&2
BLOCKSIZE 8K
SEGMENT SPACE MANAGEMENT &&3
FLASHBACK ON;

drop table mytest purge;
create table mytest(x number) tablespace tea;
create index i_mytest_x on mytest(x) tablespace tea;
insert into  mytest select level from dual connect by level <= 200126;
commit;

col object_id for 9999999 new_value b_object_id
select object_id,data_object_id from dba_objects where owner=user and object_name='I_MYTEST_X';

--delete mytest where x<=199000;
--commit;

--insert into mytest with x as (select level i from dual connect by level <= 199000) select i from x where mod(i, 250) = 0;
--commit ;

DELETE FROM mytest WHERE x <= 199000 AND MOD (x, 250) != 0;
commit
alter session set events 'immediate trace name treedump level &b_object_id';
@ &r/pp

cat a2.sql
alter system flush buffer_cache;
alter system flush buffer_cache;
alter session set events '10046 trace name context forever, level 12';
INSERT INTO MYTEST VALUES (200127);
alter session set events '10046 trace name context off';
commit ;

3.測試:
SCOTT@book> @ a1 100M 10M  manual
..

----- begin tree dump
branch: 0x1800581 25167233 (0: nrow: 399, level: 1)
   leaf: 0x1800582 25167234 (-1: nrow: 540 rrow: 2)
   leaf: 0x1800583 25167235 (0: nrow: 533 rrow: 2)
   leaf: 0x1800584 25167236 (1: nrow: 533 rrow: 2)
   leaf: 0x1800585 25167237 (2: nrow: 533 rrow: 2)
   leaf: 0x1800586 25167238 (3: nrow: 533 rrow: 2)
   leaf: 0x1800587 25167239 (4: nrow: 533 rrow: 2)
   leaf: 0x1800588 25167240 (5: nrow: 533 rrow: 2)
   leaf: 0x1800589 25167241 (6: nrow: 533 rrow: 3)
   leaf: 0x180058a 25167242 (7: nrow: 533 rrow: 2)
...
   leaf: 0x180070a 25167626 (391: nrow: 500 rrow: 2)
   leaf: 0x180070b 25167627 (392: nrow: 500 rrow: 2)
   leaf: 0x180070c 25167628 (393: nrow: 500 rrow: 2)
   leaf: 0x180070d 25167629 (394: nrow: 500 rrow: 2)
   leaf: 0x180070e 25167630 (395: nrow: 500 rrow: 128)
   leaf: 0x180070f 25167631 (396: nrow: 500 rrow: 500)
   leaf: 0x1800710 25167632 (397: nrow: 500 rrow: 500)
----- end tree dump

--//如果對比前面你可以發現nrow: 533 ,而前面delete再插入,結果是nrow: 2.

SCOTT@book> @ a2

=====================
PARSING IN CURSOR #140672891095112 len=34 dep=0 uid=83 oct=2 lid=83 tim=1486428871330173 hv=1063204715 ad='7de42b98' sqlid='5zb5uy0zpydvb'
INSERT INTO MYTEST VALUES (200127)
END OF STMT
PARSE #140672891095112:c=1000,e=1053,p=0,cr=0,cu=0,mis=1,r=0,dep=0,og=1,plh=0,tim=1486428871330167
WAIT #140672891095112: nam='db file sequential read' ela= 13 file#=6 block#=128 blocks=1 obj#=89754 tim=1486428871330358
WAIT #140672891095112: nam='db file sequential read' ela= 11 file#=6 block#=432 blocks=1 obj#=89754 tim=1486428871330422
WAIT #140672891095112: nam='db file sequential read' ela= 11 file#=6 block#=1409 blocks=1 obj#=89755 tim=1486428871330518
WAIT #140672891095112: nam='db file sequential read' ela= 10 file#=6 block#=1808 blocks=1 obj#=89755 tim=1486428871330572
WAIT #140672891095112: nam='db file sequential read' ela= 12 file#=3 block#=256 blocks=1 obj#=0 tim=1486428871330712
WAIT #140672891095112: nam='db file sequential read' ela= 10 file#=3 block#=2533 blocks=1 obj#=0 tim=1486428871330792
WAIT #140672891095112: nam='db file sequential read' ela= 11 file#=6 block#=1408 blocks=1 obj#=89755 tim=1486428871330920
EXEC #140672891095112:c=1000,e=968,p=7,cr=2,cu=26,mis=0,r=1,dep=0,og=1,plh=0,tim=1486428871331246
STAT #140672891095112 id=1 cnt=0 pid=0 pos=1 obj=0 op='LOAD TABLE CONVENTIONAL  (cr=2 pr=7 pw=0 time=922 us)'
WAIT #140672891095112: nam='SQL*Net message to client' ela= 2 driver id=1650815232 #bytes=1 p3=0 obj#=89755 tim=1486428871331418
WAIT #140672891095112: nam='SQL*Net message from client' ela= 233 driver id=1650815232 #bytes=1 p3=0 obj#=89755 tim=1486428871331682
CLOSE #140672891095112:c=0,e=7,dep=0,type=0,tim=1486428871331731
=====================

SCOTT@book> column PARTITION_NAME noprint
SCOTT@book> select * from dba_extents where file_id=6;
OWNER  SEGMENT_NAME         SEGMENT_TYPE       TABLESPACE_NAME                 EXTENT_ID    FILE_ID   BLOCK_ID      BYTES     BLOCKS RELATIVE_FNO
------ -------------------- ------------------ ------------------------------ ---------- ---------- ---------- ---------- ---------- ------------
SCOTT  MYTEST               TABLE              TEA                                     0          6        128   10485760       1280            6
SCOTT  I_MYTEST_X           INDEX              TEA                                     0          6       1408   10485760       1280            6

SCOTT@book>

--//你可以發現這樣就沒有讀這個多塊,因為這樣這些塊oracle認為不是空塊,沒有掃描這麼多塊.
--//最後才掃描dba =6,1408塊.也就是這種想像僅僅在大量刪除並且回收這些空間再使用的時候,插入最大值才有可能出現前面的情況.

3.如果UNIFORM SIZE設定小一些呢?
--這個索引佔用空間400塊
--如果設定UNIFORM SIZE=3M,這樣一個段佔用3*1024*1024/8192=384塊.
--如果設定UNIFORM SIZE=2M,這樣一個段佔用2*1024*1024/8192=256塊.
--如果設定UNIFORM SIZE=1M,這樣一個段佔用1*1024*1024/8192=128塊.
--分別測試看看.先修改指令碼如下:

$ cat a1x.sql

--引數1建立資料檔案大小 引數2建立UNIFORM SIZE 引數3 manual 或者 auto
--DROP TABLESPACE tea INCLUDING CONTENTS AND DATAFILES;
CREATE TABLESPACE TEA DATAFILE
  '/mnt/ramdisk/book/tea01.dbf' SIZE &&1 AUTOEXTEND ON NEXT 1280K MAXSIZE UNLIMITED
LOGGING
ONLINE
EXTENT MANAGEMENT LOCAL UNIFORM SIZE &&2
BLOCKSIZE 8K
SEGMENT SPACE MANAGEMENT &&3
FLASHBACK ON;

drop table mytest purge;
create table mytest(x number) tablespace tea;
create index i_mytest_x on mytest(x) tablespace tea;
insert into  mytest select level from dual connect by level <= 200126;
commit;

col object_id for 9999999 new_value b_object_id
select object_id,data_object_id from dba_objects where owner=user and object_name='I_MYTEST_X';

delete mytest where x<=199000;
commit;

insert into mytest with x as (select level i from dual connect by level <= 199000) select i from x where mod(i, 250) = 0;
commit ;

--DELETE FROM mytest WHERE x <= 199000 AND MOD (x, 250) != 0;
--commit
alter session set events 'immediate trace name treedump level &b_object_id';
@ &r/pp


--執行如下:
SCOTT@book> @ a1x 100M 3M manual
SCOTT@book> @a2

$ grep "blocks=1 obj#=89765" /u01/app/oracle/diag/rdbms/book/book/trace/book_ora_37173.trc |wc
      4      52     483

--正常.僅僅掃描4塊.

@ a1x 100M 2M manual
@ a2

$ grep "blocks=1 obj#=89767" /u01/app/oracle/diag/rdbms/book/book/trace/book_ora_37193.trc |wc
    400    5200   48034
--僅僅掃描400塊.

@ a1x 100M 1M manual
@ a2

$ grep "blocks=1 obj#=89769" /u01/app/oracle/diag/rdbms/book/book/trace/book_ora_37210.trc |wc
      4      52     482

--繼續測試assm的情況:
@ a1x 100M 3M auto
@ a2
$ grep "blocks=1 obj#=89771" /u01/app/oracle/diag/rdbms/book/book/trace/book_ora_37256.trc|wc
     26     338    3158

@ a1x 100M 2M auto
@ a2

$ grep "blocks=1 obj#=89773" /u01/app/oracle/diag/rdbms/book/book/trace/book_ora_37270.trc |wc
     72     936    8654

@ a1x 100M 1M auto
@ a2

$ grep "blocks=1 obj#=89775"  /u01/app/oracle/diag/rdbms/book/book/trace/book_ora_37281.trc|wc
     70     910    8406

4.總結:
--如果設定UNIFORM SIZE=3M,MSSM,掃描4塊
--如果設定UNIFORM SIZE=2M,MSSM,掃描400塊
--如果設定UNIFORM SIZE=1M,MSSM,掃描4塊

--如果設定UNIFORM SIZE=3M,ASSM,掃描26塊
--如果設定UNIFORM SIZE=2M,ASSM,掃描72塊
--如果設定UNIFORM SIZE=1M,ASSM,掃描70塊

--我不做分析,不知道為什麼,估計是bug.

來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/267265/viewspace-2133142/,如需轉載,請註明出處,否則將追究法律責任。

相關文章