一则使用HASH技术进行优化的案例
表结构:
create table test_key(pk varchar(32), key1 varchar(20), key2 varchar(20),
key3 varchar(20), key4 varchar(20), key5 varchar(20), key6 varchar(20),
key7 varchar(20), key8 varchar(20), key9 varchar(20), key10 varchar(20),
key11 varchar(20), key12 varchar(20), key13 varchar(20), key14 varchar(20),
key15 varchar(20), key16 varchar(20), key17 varchar(20), key18 varchar(20),
key19 varchar(20), key20 varchar(20))
业务需求:
检查指定key1~key20值的记录是否已经存在,如存在返回其pk,否则插入新值
sql如:
Select pk from test_key
where key2='key_value' and key1 = 'key_value'
and key3 = 'key_value' and key4 = 'key_value' and key5 = 'key_value' and key6 = 'key_value'
and key7 = 'key_value' and key8 = 'key_value' and key9 = 'key_value' and key10 = 'key_value'
and key11 = 'key_value' and key12 = 'key_value' and key13 = 'key_value' and key14 = 'key_value'
and key15 = 'key_value' and key16 = 'key_value' and key17 = 'key_value' and key18 = 'key_value'
and key19 = 'key_value' and key20 = 'key_value'
很可能对于任意key来说其选择性都不是很强,但是20个key的组合却是唯一的
如此结构非常不利于索引优化
针对这一应用可以考虑将key1~key20合并成一个字段key_conn,并建立索引
如增加字段Key_conn varchar(400)
如此一来表变大了近一倍
于是想到使用hash减少列大小
例如增加Key_conn varchar(16)
并将源数据通过md5转换为16位后再插入
测试如下:
构建test_key数据:
declare
i int :=1;
begin
for i in 1..200000 loop
insert into test_key (pk,key1 ,
Key2 ,key3 ,key4 ,key5 ,
key6 ,key7 ,key8 ,key9 ,key10 ,key11 ,key12 ,key13 ,key14 ,
key15 ,key16 ,key17 ,key18 ,key19 ,key20 )
values(SYS_GUID(),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20));
end loop;
commit;
end;
构建合并字段的表:
create table test_merge(pk varchar(32),
key1 varchar(20), key2 varchar(20),
key3 varchar(20), key4 varchar(20),
key5 varchar(20), key6 varchar(20),
key7 varchar(20), key8 varchar(20),
key9 varchar(20), key10 varchar(20),
key11 varchar(20), key12 varchar(20),
key13 varchar(20), key14 varchar(20),
key15 varchar(20), key16 varchar(20),
key17 varchar(20), key18 varchar(20),
key19 varchar(20), key20 varchar(20),
Key_conn varchar(16)
)
构建合并函数:
CREATE OR REPLACE FUNCTION merge_to_md5(
inputstr IN VARCHAR2)
RETURN VARCHAR2
IS
retval varchar2(16);
BEGIN
retval := (DBMS_OBFUSCATION_TOOLKIT.MD5(input_string => inputstr)) ;
RETURN retval;
END;
将源数据转换为合并数据:
Insert into test_merge
select pk,key1 ,Key2 ,key3 ,
key4 ,key5 ,key6 ,key7 ,
key8 ,key9 ,key10 ,key11 ,
key12 ,key13 ,key14 ,key15 ,
key16 ,key17 ,key18 ,key19 ,key20 ,
merge_to_md5(key1||key2||key3||key4||key5||key6||key7||key8||key9||key10||key11||key12||key13||key14||key15||key16||key17||key18||key19||key20
) from test_key
更新统计信息:
SQL> exec sys.dbms_stats.gather_table_stats(null,tabname => 'test_key',estimate_percent => 100);
PL/SQL procedure successfully completed
SQL> exec sys.dbms_stats.gather_table_stats(null,tabname => 'test_merge',estimate_percent => 100);
PL/SQL procedure successfully completed
对比数据情况:
SQL> select segment_name,blocks,bytes from user_segments where segment_name in ('TEST_KEY','TEST_MERGE');
SEGMENT_NAME BLOCKS BYTES
-------------------------------------------------------------------------------- ---------- ----------
TEST_KEY 14336 117440512
TEST_MERGE 14336 117440512
SQL> select count(*) from test_merge
2 ;
COUNT(*)
----------
200000
重复数据情况:
SQL> select count(*),key_conn from test_merge where key_conn is not null group by key_conn having count(*) >1;
COUNT(*) KEY_CONN
---------- ----------------
无重复数据
从测试的情况看,Key_conn字段基本无重复,但md5并不能排除其重复的可能性,因此并不能直接Key_conn来替换pk字段,查询时也不能直接用
Select pk from test_merge where key_conn='key_value'
来替换,但使用
Select pk from test_merge
where
key1 = 'key_value' and key2 = 'key_value' and key3 = 'key_value' and key4 = 'key_value'
and key5 = 'key_value' and key6 = 'key_value' and key7 = 'key_value' and key8 = 'key_value'
and key9 = 'key_value' and key10 = 'key_value' and key11 = 'key_value' and key12 = 'key_value'
and key13 = 'key_value' and key14 = 'key_value' and key15 = 'key_value' and key16 = 'key_value'
and key17 = 'key_value' and key18 = 'key_value' and key19 = 'key_value' and key20 = 'key_value'
and key_conn= merge_to_md5(key_value_merge)
利用key_conn上的索引定位到几乎唯一行再进行filter即可
create table test_key(pk varchar(32), key1 varchar(20), key2 varchar(20),
key3 varchar(20), key4 varchar(20), key5 varchar(20), key6 varchar(20),
key7 varchar(20), key8 varchar(20), key9 varchar(20), key10 varchar(20),
key11 varchar(20), key12 varchar(20), key13 varchar(20), key14 varchar(20),
key15 varchar(20), key16 varchar(20), key17 varchar(20), key18 varchar(20),
key19 varchar(20), key20 varchar(20))
业务需求:
检查指定key1~key20值的记录是否已经存在,如存在返回其pk,否则插入新值
sql如:
Select pk from test_key
where key2='key_value' and key1 = 'key_value'
and key3 = 'key_value' and key4 = 'key_value' and key5 = 'key_value' and key6 = 'key_value'
and key7 = 'key_value' and key8 = 'key_value' and key9 = 'key_value' and key10 = 'key_value'
and key11 = 'key_value' and key12 = 'key_value' and key13 = 'key_value' and key14 = 'key_value'
and key15 = 'key_value' and key16 = 'key_value' and key17 = 'key_value' and key18 = 'key_value'
and key19 = 'key_value' and key20 = 'key_value'
很可能对于任意key来说其选择性都不是很强,但是20个key的组合却是唯一的
如此结构非常不利于索引优化
针对这一应用可以考虑将key1~key20合并成一个字段key_conn,并建立索引
如增加字段Key_conn varchar(400)
如此一来表变大了近一倍
于是想到使用hash减少列大小
例如增加Key_conn varchar(16)
并将源数据通过md5转换为16位后再插入
测试如下:
构建test_key数据:
declare
i int :=1;
begin
for i in 1..200000 loop
insert into test_key (pk,key1 ,
Key2 ,key3 ,key4 ,key5 ,
key6 ,key7 ,key8 ,key9 ,key10 ,key11 ,key12 ,key13 ,key14 ,
key15 ,key16 ,key17 ,key18 ,key19 ,key20 )
values(SYS_GUID(),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20),
dbms_random.string('a',20),dbms_random.string('a',20));
end loop;
commit;
end;
构建合并字段的表:
create table test_merge(pk varchar(32),
key1 varchar(20), key2 varchar(20),
key3 varchar(20), key4 varchar(20),
key5 varchar(20), key6 varchar(20),
key7 varchar(20), key8 varchar(20),
key9 varchar(20), key10 varchar(20),
key11 varchar(20), key12 varchar(20),
key13 varchar(20), key14 varchar(20),
key15 varchar(20), key16 varchar(20),
key17 varchar(20), key18 varchar(20),
key19 varchar(20), key20 varchar(20),
Key_conn varchar(16)
)
构建合并函数:
CREATE OR REPLACE FUNCTION merge_to_md5(
inputstr IN VARCHAR2)
RETURN VARCHAR2
IS
retval varchar2(16);
BEGIN
retval := (DBMS_OBFUSCATION_TOOLKIT.MD5(input_string => inputstr)) ;
RETURN retval;
END;
将源数据转换为合并数据:
Insert into test_merge
select pk,key1 ,Key2 ,key3 ,
key4 ,key5 ,key6 ,key7 ,
key8 ,key9 ,key10 ,key11 ,
key12 ,key13 ,key14 ,key15 ,
key16 ,key17 ,key18 ,key19 ,key20 ,
merge_to_md5(key1||key2||key3||key4||key5||key6||key7||key8||key9||key10||key11||key12||key13||key14||key15||key16||key17||key18||key19||key20
) from test_key
更新统计信息:
SQL> exec sys.dbms_stats.gather_table_stats(null,tabname => 'test_key',estimate_percent => 100);
PL/SQL procedure successfully completed
SQL> exec sys.dbms_stats.gather_table_stats(null,tabname => 'test_merge',estimate_percent => 100);
PL/SQL procedure successfully completed
对比数据情况:
SQL> select segment_name,blocks,bytes from user_segments where segment_name in ('TEST_KEY','TEST_MERGE');
SEGMENT_NAME BLOCKS BYTES
-------------------------------------------------------------------------------- ---------- ----------
TEST_KEY 14336 117440512
TEST_MERGE 14336 117440512
SQL> select count(*) from test_merge
2 ;
COUNT(*)
----------
200000
重复数据情况:
SQL> select count(*),key_conn from test_merge where key_conn is not null group by key_conn having count(*) >1;
COUNT(*) KEY_CONN
---------- ----------------
无重复数据
从测试的情况看,Key_conn字段基本无重复,但md5并不能排除其重复的可能性,因此并不能直接Key_conn来替换pk字段,查询时也不能直接用
Select pk from test_merge where key_conn='key_value'
来替换,但使用
Select pk from test_merge
where
key1 = 'key_value' and key2 = 'key_value' and key3 = 'key_value' and key4 = 'key_value'
and key5 = 'key_value' and key6 = 'key_value' and key7 = 'key_value' and key8 = 'key_value'
and key9 = 'key_value' and key10 = 'key_value' and key11 = 'key_value' and key12 = 'key_value'
and key13 = 'key_value' and key14 = 'key_value' and key15 = 'key_value' and key16 = 'key_value'
and key17 = 'key_value' and key18 = 'key_value' and key19 = 'key_value' and key20 = 'key_value'
and key_conn= merge_to_md5(key_value_merge)
利用key_conn上的索引定位到几乎唯一行再进行filter即可
在key_conn 上建立 索引,key_conn字段
也可以用char 16,oracle这个md5输出长度是16,所以用的16位
与源数据无关,与hash之后长度有关
来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/13165828/viewspace-610547/,如需转载,请注明出处,否则将追究法律责任。
转载于:http://blog.itpub.net/13165828/viewspace-610547/