Index Rebuild Online 过程(9i)完整版(zt)

昨天晚上洗澡时新想到一个可以有效测试index rebuild online的方式, 也就是同时使用10046与10704 的trace event再配合lock阻塞的机制来测试index rebuild online的过程.

测试的过程如下.

01 --1. 构造一个500w条记录的表. 并创建需要rebuild online的索引.
02 --表的数量设置到这么大,,主要是为了给后续的操作留出手工运行的时间.
03 create table james_t as
04 select rownum id,dbms_random.string('l',20) user_name
05 from dual
06 connect by level <= 5e6;
07
08 create index james_t_pk on james_t (id);
09
10 --2. 在Session 2 (故意设置阻塞的进程)运行一个针对表james_t 的单条记录的更新.
11 update james_t set user_name = 'test 1' where rownum <= 1;
12
13 --3. 在Session 3中观察,当观察到session 1被堵塞在Table Share Lock的Request模式时进行下一步.
14 select /*+ rule*/a.* from v$lock a,v$session b where a.sid = b.sid and b.username = 'JAMES';
15
16 --4. 在Session 1 (运行index rebuild online的进程)运行如下语句.
17 alter session set events '10704 trace name context forever,level 10';
18 alter session set events '10046 trace name context forever,level 12';
19 alter index james_t_pk rebuild online;
20
21 --5. 在Session 2 提交前一个事务,使得rebuild online 过程继续,并运行一个需要大量index字段的更新操作.
22 --构造需要rebuild online在获取下一次Table Share lock之前需要Merge的数据.
23 commit;
24 update james_t set id = 5000000 + id where rownum <= 3e5;
25 commit;
26
27 --6. 在Session 2上运行一个类似于第二步的简单更新,意在阻塞Rebuild Online获取Table Share Lock.
28 update james_t set user_name = 'test 1' where rownum <= 1;
29
30 --7. 在Session 3中观察,当观察到session 1被堵塞在Table Share Lock的Request模式时,执行下面的语句后进行下一步.
31 --清空Buffer Cache以观察Session 1在获取Table Share Lock后执行的操作.(可供下载的Trace文件没有做这一步)
32 alter session set events = 'immediate trace name flush_cache';
33
34 --8. 提交Session 2的事务, 等待Rebuild Online结束..
35
36 --9. 从Session 3中提取出Session 1对应的Trace文件,,以及下面的v$session_longops 的结果.
37 SID SERIAL# OPNAME TARGET TARGET_DESC SOFAR TOTALWORK UNITS START_TIME LAST_UPDATE_TIME
38 10 19 Sort Output 14759 14759 Blocks 07/28/2010 09:14:26 07/28/2010 09:14:41

1. 取得表上的Sub Share锁. 索引的object_id = 6399.
*** 2010-07-27 23:07:16.000
ksqcmi: TM,18fe,0 mode=2 timeout=21474836
ksqcmi: returns 0

2. 创建日志表.
"JAMES"."SYS_JOURNAL_6399"

create table "JAMES"."SYS_JOURNAL_6399" (C0 NUMBER, opcode char(1), partno number, rid rowid, primary key( C0 , rid ))
organization index TABLESPACE "USERS"

CREATE UNIQUE INDEX "JAMES"."SYS_IOT_TOP_6406" on "JAMES"."SYS_JOURNAL_6399"("C0","RID") INDEX ONLY TOPLEVEL TABLESPACE "USERS" NOPARALLEL

3. 请求表上的Share锁.
*** 2010-07-27 23:07:16.000
ksqcmi: TM,18fe,0 mode=4 timeout=21474836
WAIT #1: nam='enqueue' ela= 3072242 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072273 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072430 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3071962 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072350 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072367 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072086 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072453 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 387366 p1=1414332420 p2=6398 p3=0
ksqcmi: returns 0

2010-07-27 23:07:16.000 + 24.965529 = 2010-07-27 23:07:40.966
--此时间与下面获取Table Sub Share Lock的时间仅相差20ms左右. 中间的时间主要为统计误差,,因为Trace中的输出是连续的.

4. 取得完毕之后,,立即获取表上的Sub Share锁.
*** 2010-07-27 23:07:41.000
ksqcmi: TM,18fe,0 mode=2 timeout=21474836
ksqcmi: returns 0

5. 读取基础表,,创建索引.
WAIT #1: nam='db file scattered read' ela= 42228 p1=5 p2=4709 p3=4
WAIT #1: nam='db file scattered read' ela= 326 p1=5 p2=4710 p3=3
WAIT #1: nam='db file scattered read' ela= 236 p1=5 p2=4711 p3=2
.................
WAIT #1: nam='db file scattered read' ela= 549 p1=5 p2=36357 p3=4
WAIT #1: nam='db file scattered read' ela= 481 p1=5 p2=36358 p3=3

6. 开始Sort并输出索引. (写入临时文件)
WAIT #1: nam='direct path write' ela= 6 p1=201 p2=19942 p3=31
WAIT #1: nam='direct path write' ela= 2 p1=201 p2=19973 p3=4
WAIT #1: nam='direct path write' ela= 2 p1=201 p2=19721 p3=31
WAIT #1: nam='direct path write' ela= 395 p1=201 p2=19752 p3=12

--继续从表上读取内容.
WAIT #1: nam='db file scattered read' ela= 476 p1=5 p2=36359 p3=2
WAIT #1: nam='db file sequential read' ela= 417 p1=5 p2=36360 p3=1

WAIT #1: nam='direct path write' ela= 6 p1=201 p2=19942 p3=31
WAIT #1: nam='direct path write' ela= 2 p1=201 p2=19973 p3=4
WAIT #1: nam='direct path write' ela= 2 p1=201 p2=19721 p3=31
WAIT #1: nam='direct path write' ela= 395 p1=201 p2=19752 p3=12

--从临时文件读出排好序的结果.
WAIT #1: nam='direct path read' ela= 211 p1=201 p2=19763 p3=1
WAIT #1: nam='direct path read' ela= 57865 p1=201 p2=27472 p3=31
WAIT #1: nam='direct path read' ela= 14829 p1=201 p2=34281 p3=31
..........
WAIT #1: nam='direct path read' ela= 14853 p1=201 p2=20342 p3=19
WAIT #1: nam='direct path read' ela= 16932 p1=201 p2=26315 p3=31
WAIT #1: nam='direct path read' ela= 12710 p1=201 p2=21154 p3=31
WAIT #1: nam='direct path read' ela= 16599 p1=201 p2=32412 p3=31

写索引文件,扩展segment信息.

select file# from file$ where ts#=:1
select type#,blocks,extents,minexts,maxexts,extsize,extpct,user#,iniexts,NVL(lists,65535),NVL(groups,65535),cachehint,hwmincr, NVL(spare1,0) from seg$ where ts#=:1 and file#=:2 and block#=:3
insert into seg$ (file#,block#,type#,ts#,blocks,extents,minexts,maxexts,extsize

,extpct,user#,iniexts,lists,groups,cachehint,bitmapranges,scanhint, hwmincr, spare1) values (:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11,:12,:13,:14,:15,0,0,:16,DECODE(:17,0,NULL,:17))
中间再夹杂部分
WAIT #1: nam='direct path read' ela= 20442 p1=201 p2=31559 p3=1

--结束Sort Output并使用Direct path write写入新索引.
WAIT #1: nam='direct path read' ela= 8504 p1=201 p2=19849 p3=1
WAIT #1: nam='direct path read' ela= 263 p1=201 p2=19974 p3=1
WAIT #1: nam='direct path read' ela= 46962 p1=201 p2=19721 p3=1
WAIT #1: nam='direct path write' ela= 359 p1=5 p2=48351 p3=7
WAIT #1: nam='direct path write' ela= 5 p1=5 p2=48358 p3=7

---在此时间点结束新索引的创建工作.
SID SERIAL# OPNAME TARGET TARGET_DESC SOFAR TOTALWORK UNITS START_TIME LAST_UPDATE_TIME
10 19 Sort Output 14759 14759 Blocks 07/28/2010 09:14:26 07/28/2010 09:14:41

7. 读取Journal表上的变更,,将变更Merge到新的索引上.
--从10046 的traced Event的角度看,,新的索引文件写完成,开始读取Journal表的内容,以merge新索引.

WAIT #1: nam='direct path read' ela= 23577 p1=201 p2=31718 p3=1
WAIT #1: nam='direct path read' ela= 60459 p1=201 p2=31877 p3=1
WAIT #1: nam='direct path write' ela= 5622 p1=5 p2=52496 p3=7
WAIT #1: nam='direct path write' ela= 3 p1=5 p2=52503 p3=2
WAIT #1: nam='db file sequential read' ela= 32390 p1=5 p2=397 p3=1
WAIT #1: nam='db file sequential read' ela= 34345 p1=5 p2=397 p3=1
WAIT #1: nam='db file sequential read' ela= 100004 p1=5 p2=52005 p3=1

--结束新索引的Merge工作.
WAIT #1: nam='db file sequential read' ela= 1521 p1=5 p2=32192 p3=1
WAIT #1: nam='db file sequential read' ela= 205 p1=5 p2=32192 p3=1
WAIT #1: nam='db file sequential read' ela= 252 p1=5 p2=32200 p3=1
WAIT #1: nam='db file sequential read' ela= 375 p1=5 p2=32200 p3=1

8. 请求表上的Share所.
--请求表上的Share 锁,,以准备结束索引重建..
*** 2010-07-28 09:15:17.000
ksqcmi: TM,18fe,0 mode=4 timeout=21474836
WAIT #1: nam='enqueue' ela= 3071546 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072536 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072024 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072293 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072416 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072140 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072175 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072294 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072249 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072318 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072184 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072216 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 3072247 p1=1414332420 p2=6398 p3=0
WAIT #1: nam='enqueue' ela= 1244788 p1=1414332420 p2=6398 p3=0
ksqcmi: returns 0

9. 读取刚刚阻塞Index Rebuild获取Share 锁所产生的Journal日志并将变更merge到索引上.

--说明,,由于Index Rebuild Online进程是做Enqueue Conversion,所以只可能有一个Session会阻塞此进程.
--此次需要Merge的变更量只是阻塞进程产生的变更量,因此一般情况下,,持有Share锁的时间比较短.
--但是会比第一次持有要稍长一点. 需要等后续清理对象的操作结束才能释放.

10. 删除Journal表
drop table "JAMES"."SYS_JOURNAL_6399"

9. 申请Journal表上的Exclusive 锁.
*** 2010-07-27 23:11:03.000
ksqcmi: TM,1906,0 mode=6 timeout=0
ksqcmi: returns 0
=====================

10. 结束索引重建,,修改相关数据字典表
--更新索引上的data_object_id.
update ind$ set ts#=:2,file#=:3,block#=:4,intcols=:5,type#=:6,flags=:7,property=:8,

pctfree$=:9,initrans=:10,maxtrans=:11,blevel=:12,leafcnt=:13,distkey=:14,

lblkkey=:15,dblkkey=:16,clufac=:17,cols=:18,analyzetime=:19,

samplesize=:20,dataobj#=:21,degree=decode(:22,1,null,:22),instances=decode(:23,1,null,:23),rowcnt=:24,pctthres$=:31*256+:25, indmethod#=:26, trunccnt=:27,spare1=:28,spare4=:29,spare2=:30,spare6=:32where obj#=:1
bind 19: dty=2 mxl=22(22) mal=00 scl=00 pre=00 oacflg=08 oacfl2=1 size=24 offset=0
bfp=032cc81c bln=24 avl=03 flg=05
value=6405
bind 33: dty=2 mxl=22(22) mal=00 scl=00 pre=00 oacflg=08 oacfl2=1 size=24 offset=0
bfp=032cd8dc bln=22 avl=03 flg=05
value=6399

更新对象的data_object_id
update obj$ set obj#=:6,type#=:7,ctime=:8,mtime=:9,stime=:10,status=:11,dataobj#=:13

,flags=:14,oid$=:15,spare1=:16, spare2=:17 where owner#=:1 and name=:2 and namespace=:3 and(remoteowner=:4 or remoteowner is null and :4 is null)and(linkname=:5 or linkname is null and :5 is null)and(subname=:12 or subname is null and :12 is null)

设置对象新关联的seg实体.通过ts#,header_file#,header_block#
update seg$ set type#=:4,blocks=:5,extents=:6,minexts=:7,maxexts=:8,extsize=:9,

extpct=:10,user#=:11,iniexts=:12,lists=decode(:13, 65535, NULL, :13),groups=decode(:14, 65535, NULL, :14), cachehint=:15, hwmincr=:16, spare1=DECODE(:17,0,NULL,:17) where ts#=:1 and file#=:2 and block#=:3

执行Ojbect Checkpoint.
WAIT #1: nam='rdbms ipc reply' ela= 54 p1=5 p2=21474836 p3=0
WAIT #1: nam='rdbms ipc reply' ela= 44 p1=5 p2=21474836 p3=0

11. 到此索引Rebuild完成.

完整的Trace文件文件下载. james_ora_4268.trc.gz

[@more@]

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/41451/viewspace-1053880/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/41451/viewspace-1053880/

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值