Oracle simple stream

一.Single stream #9R2和10g均可设定
1.主从库必须起动归档
  主库可以基于Database级别或Table级别,启用追加日志(Supplemental Log)。在建立根据Schema粒度进行复制的Oracle Stream 环境中,如果确认Schema 下所有Table 都有合理的主键(Primary Key),则不再需要启用追加日志。
#启用Database 追加日志
alter database add supplemental log data;
ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE INDEX, FOREIGN KEY) COLUMNS;
#启用Table追加日志
alter table add supplement log group log_group_name(table_column_name) always;
 
2.主从建streams admin 表空间
 CREATE TABLESPACE streams_tbs DATAFILE '/u01/app/oracle/oradata/sid/streams_tbs.dbf' size 50M reuse autoextend on maxsize unlimited;
 
3.主从建streams user
   CREATE USER strmadmin IDENTIFIED BY strmadmin DEFAULT TABLESPACE streams_tbs temporary tablespace temp QUOTA UNLIMITED ON streams_tbs;
 grant connect,resource,dba,aq_administrator_role to strmadmin;
 BEGIN
    DBMS_STREAMS_AUTH.GRANT_ADMIN_PRIVILEGE(
       grantee          => 'strmadmin',   
       grant_privileges => true);
 END;
 
4.主从建立DB link  #propagate messages from a source queue at a database to a destination queue
  connect strmadmin/strmadmin@qhadb
  create public database link qhrac connect to strmadmin identified by strmadmin using 'qhrac';
   connect strmadmin/strmadmin@qhrac
  create public database link qhadb connect to strmadmin identified by strmadmin using 'qhrac';

5.主库参数修改,并在归档下
alter system set aq_tm_processes=2 scope=both;    //不要设置成0或者10, 这样做可能禁用队列监测处理和影响流池内存利用率
alter system set global_names=true scope=both;
alter system set job_queue_processes=10 scope=both;   //建议的最低值为4,定义传播进程数
alter system set parallel_max_servers=20 scope=both;  //并行执行可用的最大进程数量
alter system set undo_retention=3600 scope=both;  //数据提交后至少保留的秒数.
alter system set nls_date_format='YYYY-MM-DD HH24:MI:SS' scope=spfile; //时间格式
alter system set streams_pool_size=200M scope=spfile;  //没有捕获进程无须配置此处
alter system set open_links=4 scope=spfile;    //数据库连接数.DB_LINK

6.主库create queue
connect strmadmin/strmadmin
begin
 dbms_streams_adm.set_up_queue(
 queue_table => 'qhadb_queue_table',
 queue_name => 'qhadb_queue');
end;
 
7.从库create queue
connect strmadmin/strmadmin
begin
 dbms_streams_adm.set_up_queue(
 queue_table => 'qhrac_queue_table',
 queue_name => 'qhrac_queue');
end;
 
8.主库创建捕获进程 
connect strmadmin/strmadmin
begin
 dbms_streams_adm.add_schema_rules(
 schema_name => 'hs_his',
 streams_type => 'capture',
 streams_name => 'capture_qhadb',
 queue_name => 'strmadmin.qhadb_queue',
 include_dml => true,
 include_ddl => true,
 include_tagged_lcr => true,
 source_database => null,
 inclusion_rule => true);
end;
 
9.实例化复制数据库
  主数据库exp -> 从数据库
  impdp system/oracle network_link=qhadb schemas=hs_his
 
connect strmadmin/strmadmin
DECLARE
 iscn NUMBER;
BEGIN
 iscn := DBMS_FLASHBACK.GET_SYSTEM_CHANGE_NUMBER();
 DBMS_APPLY_ADM.SET_SCHEMA_INSTANTIATION_SCN@qhrac(
  source_schema_name    => 'hs_his',
  source_database_name => 'qhadb',
  instantiation_scn     => iscn,
  recursive             => true);
END;

10.主库创建传播进程##queue_to_queue =ture   automatic failover when the destination queue is a buffered queue in a Real Application Clusters (RAC) database
connect strmadmin/strmadmin
begin
 dbms_streams_adm.add_schema_propagation_rules(
 schema_name => 'hs_his',
 streams_name => 'qhadb_to_qhrac',
 source_queue_name => 'strmadmin.qhadb_queue',
 destination_queue_name => 'strmadmin.qhrac_queue@qhrac',
 include_dml => true,
 include_ddl => true,
 include_tagged_lcr => false,
 source_database => 'qhadb',
 inclusion_rule => true,
  queue_to_queue => true);
end;
修改propagation休眠时间为0,表示实时传播LCR
begin
 dbms_aqadm.alter_propagation_schedule(
 queue_name => 'qhadb_queue',
 destination => 'qhrac',
 latency => 0);
end;
 
11.从库创建应用进程
connect strmadmin/strmadmin
begin
 dbms_streams_adm.add_schema_rules(
 schema_name => 'hs_his',
 streams_type => 'apply',
 streams_name => 'apply_qhrac',
 queue_name => 'strmadmin.qhrac_queue',
 include_dml => true,
 include_ddl => true,
 include_tagged_lcr => true,
 source_database => 'qhadb',
 inclusion_rule => true);
end;

12.从库启动Apply进程
CONNECT strmadmin/strmadmin
BEGIN
    DBMS_APPLY_ADM.SET_PARAMETER(
    apply_name => 'apply_qhrac',
    parameter   => 'disable_on_error',
    value       => 'n');
begin
 dbms_apply_adm.start_apply(
 apply_name => 'apply_qhrac');
end;
停止
connect strmadmin/strmadmin
begin
 dbms_apply_adm.stop_apply(
 apply_name => 'apply_qhrac');
end;
 
13.主库启动Capture进程
connect strmadmin/strmadmin
begin
 dbms_capture_adm.start_capture(
 capture_name => 'capture_qhadb');
end;
停止
connect strmadmin/strmadmin
begin
 dbms_capture_adm.stop_capture(
 capture_name => 'capture_qhadb');
end;
 
14.清除Stream配置信息(先要停),主从库,10.2.0.3以前会报错Bug 5640593
connect strmadmin/strmadmin
exec DBMS_STREAMS_ADM.remove_streams_configuration();

15.管理
   查看道捕捉(Capture)进程
   SELECT CAPTURE_NAME,QUEUE_NAME,RULE_SET_NAME,NEGATIVE_RULE_SET_NAME,START_SCN,STATUS,CAPTURE_TYPE FROM DBA_CAPTURE;
    STATUS:ENABLED,表示Capture进程运行正常;
         DISABLED,表示Capture进程处于停止状态,只需重新启动即可;
         ABORTED,表示Capture 进程非正常停止,查询相应的ERROR_NUMBER、ERROR_MESSAGE 列可以得到详细的信息;同时,Oracle会在跟踪文件中记录该信息。
 
   select sid,serial#,capture_name,startup_time,state,state_changed_time from v$streams_capture
    state: INITIALIZING-开始
  WAITING FOR DICTIONARY REDO ---等待包含关系first scn的字典的redo log 文件被加进捕获进程会话。直到所有包含字典的redo log文件被加,捕获进程才可以开始扫描redo log文件。
   CAPTURING CHANGES           ---扫描redo log。
   CREATING LCR      ---把改变格式化成LCR。
   ENQUEUING MESSAGE           ---把满足规则的LCR塞进捕获队列。
   PAUSED FOR FLOW CONTROL     ---不能够塞LCR进队列。可能由于传播或应用进程消化消息慢过捕获进程创建消息。当传播包或应用进程远远落后或不可用时,它可以提醒,减少遗漏捕获的信息。
   select * from ALL_CAPTURE_PREPARED_SCHEMAS;    
  
   查Captured LCR是否有传播GAP?
   SELECT CAPTURE_NAME, QUEUE_NAME, STATUS, CAPTURED_SCN,APPLIED_SCN FROM DBA_CAPTURE;
    如果APPLIED_SCN 小于CAPTURED_SCN,则表示在主数据库一端,要么LCR 没有被dequeue,要么Propagation进程尚未传播到从数据库一端。
  
   查Appy进程
   SELECT apply_name, apply_captured, status,error_message FROM dba_apply;
    STATUS:ENABLED,表示Apply进程运行正常;
  DISABLED,表示Apply进程处于停止状态,只需重新启动即可;
  ABORTED,表示Apply 进程非正常停止,查询相应的ERROR_NUMBER、ERROR_MESSAGE 列可以得到详细的信息;同时,可以查询DBA_APPLY_ERROR视图,了解详细的Apply错误信息。
  
   select apply_name,queue_name,message_number,error_number,error_message,message_count,error_creation_time from DBA_APPLY_ERROR;
   v$streams_apply_reader
   v$streams_apply_coordinator
   v$streams_apply_server
 
16.从库和主库之间时间间隔跟性能及其它情况有关,包括capture mining logfile的速度,propagation的间隔(默认3秒)及传输时间,apply log的速度
 
二.example
在Oracle Stream Replication提供的库中,有一个SubSet的概念,可以把一个Table的数据做一个Horizontal的Partition,也就是说可以水平分割Table中的数据,
来实现数据的同步。但是在某些应用场合,存在这样的需求,就是一个Table中的一些字段Change的频率相当频繁,而这些Column是特定于本地的AS的,
A Site的这些Column的值对于B Site的AS来说,无关紧要。因为,在应用上通常,B Site的AS会重新设置这些Column的值。
如果Stream replication对Replication不加以区分的话,那这些changes很频繁的Column会给整个系统带来很大的性能影响,
特别是在3个Site的相互Replication上。
复制或同步部分字段,而不是整个表字段, 实现的方法是采用Rule-base Transformation,而且是针对Capture Process,因为如果不从Source DB源头做的话,通过大量的Propagation和Apply,对系统的性能影响和不做是没太大的区别的。
以2个Site的Replication为例:db1 --&gt db2
1.首先,建立相应的Capture、Apply和Propagation,以schema的级别来Setup,而且建立起来的Rule都是属于Positive的,不是Negative.
   rem ;set up the apply queues
   rem ;*************************************************************************
   begin
    dbms_streams_adm.set_up_queue(
        queue_table    => 'apply_db2tab',
        queue_name    => 'apply_db2',
        queue_user    => 'strmadmin');
   end;
   /
   rem ;set up the capture queue
   rem ;*************************************************************************
   begin
    dbms_streams_adm.set_up_queue(
        queue_table    => 'capture_db1tab',
        queue_name    => 'capture_db1',
        queue_user    => 'strmadmin');
   end;
   /
   rem ;set up the apply process
   rem ;*************************************************************************
   begin
    dbms_streams_adm.add_schema_rules (
          schema_name  => 'bartholo',
          streams_type => 'apply',
          streams_name => 'apply_src_db2',
          queue_name  => 'apply_db2',
          include_dml => true,
          include_ddl => true,
          source_database => 'db2.world');
   end;
  /
  rem ;set up the capture process
  rem ;*************************************************************************
  begin
    dbms_streams_adm.add_schema_rules (
          schema_name  => 'bartholo',
          streams_type => 'capture',
          streams_name => 'capture_db1strm',
          queue_name  => 'capture_db1',
          include_dml => true,
          include_ddl => true,
          inclusion_rule => true);
  end;
 /
 rem ;set up the propagation process
 rem ;*************************************************************************
 begin
    dbms_streams_adm.add_schema_propagation_rules (
      schema_name   => 'bartholo',
      streams_name   => 'prop_db1_to_db2',
      source_queue_name  => 'capture_db1',
      destination_queue_name  => 'strmadmin.apply_db1@db2.world',
      include_dml   => true,
      include_ddl   => true,
      source_database  => 'db1.world');
 end;
 /
2.这里我的Stream User是strmadmin,已经给以了DBA的权限。这部要做的是实现一个Transform. Function.
   这里假设我们的Table是这样定义的:tab1(col1, col2, col3)其中col3的Update操作不做Replication.达到的目标是:不管Source Site对Col3做任何的Update修改,在目的站点都是看不见,保持不变。
CREATE OR REPLACE PACKAGE strmpkg as
     function transform_capture(in_any IN SYS.AnyData) RETURN SYS.AnyData;
END strmpkg;
/
CREATE OR REPLACE PACKAGE BODY strmpkg as
FUNCTION transform_capture(in_any IN SYS.AnyData)
  RETURN SYS.AnyData
IS
  lcr        SYS.LCR$_ROW_RECORD;
  rc        NUMBER;
  ob_owner    VARCHAR2(30);
  ob_name    VARCHAR2(30);
  cmd        VARCHAR2(10);
  newvalue_anydata SYS.AnyData;
  oldvalue_anydata SYS.Anydata;
BEGIN
  IF in_any.GETTYPENAME='SYS.LCR$_ROW_RECORD' THEN
    -- Put the row LCR into lcr
    rc := in_any.GETOBJECT(lcr);
  
    -- Get the object owner and name
    ob_owner := lcr.GET_OBJECT_OWNER();
    ob_name := lcr.GET_OBJECT_NAME();
    cmd    := lcr.GET_COMMAND_TYPE();
    --只对bartholo这个schema 和 Update的操作进行转换,其它的放行
    IF ob_owner = 'bartholo' AND cmd = 'UPDATE' THEN
      IF ob_name = 'tab1' THEN
        -- Remove the specified column in the LCR
        newvalue_anydata := lcr.GET_VALUE('new','col3');
        oldvalue_anydata := lcr.GET_VALUE('old','col3');
        IF ( (newvalue_anydata IS NOT NULL) OR (oldvalue_anydata IS NOT NULL) )
        THEN
          --just only the column existed,  u can delte the column
          lcr.DELETE_COLUMN(column_name => 'col3');
        END IF;
      END IF;
    END IF;
    RETURN SYS.ANYDATA.CONVERTOBJECT(lcr);
  END IF;
  RETURN in_any;
END transform_capture;
END strmpkg;
/
3.把这个Transform. Function和Capture的DML Rule关联起来。
DECLARE
 capture_dml_rule VARCHAR2(30);
BEGIN
 SELECT rule_name INTO capture_dml_rule FROM sys.streams$_rules
 WHERE streams_name='CAPTURE_DB1STRM' and rule_type = 1 AND ROWNUM = 1
 ORDER BY rule_name DESC;
 DBMS_STREAMS_ADM.set_rule_transform_function(
 rule_name    => capture_dml_rule,
 transform_function    => 'strmpkg.transform_capture');
END;
/
4.把Schema及其包含的对象在2站点上相互做个Instantiation.
DECLARE
 iscn  NUMBER;         -- Variable to hold instantiation SCN value
BEGIN
 iscn := DBMS_FLASHBACK.GET_SYSTEM_CHANGE_NUMBER();
 DBMS_OUTPUT.PUT_LINE('The SCN for db2 and db3: ' || iscn);
 DBMS_APPLY_ADM.SET_SCHEMA_INSTANTIATION_SCN@db2.world(
  source_schema_name    => 'bartholo',
  source_database_name  => 'db1.world',
  instantiation_scn     => iscn);
END;
/
 
5.启动db2站点上的Apply, DB1上的Propagation和Capture。

三.一对多复制
一对多与一对一不同的就是,在源数据库传播的时候要向多个目标数据库传播,每个目标数据库都要配置相应的应用进程
   db1--&gtdb2;db1--&gtdb3
1.参数修改(db1,db2,db3)
alter system set aq_tm_processes=4 scope=spfile;
alter system set global_names=true scope=spfile;
alter system set job_queue_processes=8 scope=spfile;
alter system set log_parallelism=1 scope=spfile;
alter system set global_names=true scope=spfile;
2.创建流管理员,并对其进行授权(db1,db2,db3)
CREATE USER strmadmin IDENTIFIED BY strmadmin DEFAULT TABLESPACE users QUOTA UNLIMITED ON users;
GRANT DBA, CONNECT, RESOURCE, SELECT_CATALOG_ROLE TO strmadmin;
GRANT EXECUTE ON DBMS_AQADM TO strmadmin;
GRANT EXECUTE ON DBMS_CAPTURE_ADM TO strmadmin;
GRANT EXECUTE ON DBMS_PROPAGATION_ADM TO strmadmin;
GRANT EXECUTE ON DBMS_STREAMS_ADM TO strmadmin;
GRANT EXECUTE ON DBMS_APPLY_ADM TO strmadmin;
GRANT EXECUTE ON DBMS_FLASHBACK TO strmadmin;
GRANT ALL ON scott.dept TO strmadmin;
BEGIN
    DBMS_RULE_ADM.GRANT_SYSTEM_PRIVILEGE(
        privilege => DBMS_RULE_ADM.CREATE_RULE_SET_OBJ,
        grantee => 'strmadmin',
        grant_option => FALSE);
END;
/
BEGIN
    DBMS_RULE_ADM.GRANT_SYSTEM_PRIVILEGE(
        privilege => DBMS_RULE_ADM.CREATE_RULE_OBJ,
        grantee => 'strmadmin',
        grant_option => FALSE);
END;
/
3.建db link
db1:
create public database link db2 connect to strmadmin identified by strmadmin using 'db2';
create public database link db3 connect to strmadmin identified by strmadmin using 'db3';
db2,db3:
create public database link db1 connect to strmadmin identified by strmadmin using 'db1';
4.创建LogMinor 表空间,并添加SUPPLEMENTAL 日志 (db1)
conn /as sysdba
CREATE TABLESPACE logmnr_ts DATAFILE 'D:\oracle\product\10.2.0\oradata\orcl\logmnr01.dbf' SIZE 25 M REUSE AUTOEXTEND ON MAXSIZE UNLIMITED;
EXECUTE DBMS_LOGMNR_D.SET_TABLESPACE('logmnr_ts');
#启用Database 追加日志
alter database add supplemental log data;
#启用Table追加日志
alter table *** add supplement log group log_group_name(table_column_name) always;
5.配置传播进程(db1)
conn strmadmin/strmadmin@db1
BEGIN
 DBMS_STREAMS_ADM.ADD_TABLE_PROPAGATION_RULES(
 table_name => 'scott.dept',
 streams_name => 'db1_to_db2',
 source_queue_name => 'strmadmin.streams_queue',
 destination_queue_name => 'strmadmin.streams_queue@db2',
 include_dml => true,
 include_ddl => true,
 source_database => 'db1');
END;
/
备注:参数destination_queue_name中的db2是创建的指向db2数据库的db link,source_database的值必须是目标数据库指向源数据库的db link;
BEGIN
 DBMS_STREAMS_ADM.ADD_TABLE_PROPAGATION_RULES(
 table_name => 'scott.dept',
 streams_name => 'db1_to_db3',
 source_queue_name => 'strmadmin.streams_queue',
 destination_queue_name => 'strmadmin.streams_queue@db3',
 include_dml => true,
 include_ddl => true,
 source_database => 'db1');
END;
/
检查传播进程作业
SELECT job,TO_CHAR(last_date, 'DD-Mon-YYYY HH24:MI:SS') last_date,TO_CHAR(next_date, 'DD-Mon-YYYY HH24:MI:SS') next_date,what FROM dba_jobs;
6.配置捕捉进程(db1)
CONNECT strmadmin/strmadmin@DB1
BEGIN
 DBMS_STREAMS_ADM.ADD_TABLE_RULES(
 table_name => 'scott.dept',
 streams_type => 'capture',
 streams_name => 'capture_simp',
 queue_name => 'strmadmin.streams_queue',
 include_dml => true,
 include_ddl => true);
END;
/
7.配置初始 SCN
在应用进程工作前必须在目的表中配置源表的 SCN。如果目的表已经存在,可用 exp/imp 元数据完成。
exp userid=scott/tiger@db1 FILE=dept_instant.dmp TABLES=dept OBJECT_CONSISTENT=y ROWS=n
imp userid=scott/tiger@db2 FILE=dept_instant.dmp IGNORE=y COMMIT=y LOG=import.log STREAMS_INSTANTIATION=y
imp userid=scott/tiger@db3 FILE=dept_instant.dmp IGNORE=y COMMIT=y LOG=import.log STREAMS_INSTANTIATION=y
因为在迁移元数据时 SUPPLEMENTAL 日志也被迁移。因为捕捉进程还没有启动,所以可以删除这些日志:
CONN sys/password@DB2 AS SYSDBA
ALTER TABLE scott.dept DROP SUPPLEMENTAL LOG GROUP log_group_dept_pk;
CONN sys/password@DB3 AS SYSDBA
ALTER TABLE scott.dept DROP SUPPLEMENTAL LOG GROUP log_group_dept_pk;
用 DBMS_APPLY_ADM 包设置 SCN
DECLARE
 v_scn NUMBER;
BEGIN
 v_scn := DBMS_FLASHBACK.GET_SYSTEM_CHANGE_NUMBER();
 DBMS_APPLY_ADM.SET_TABLE_INSTANTIATION_SCN@db2(
 source_object_name => 'scott.dept',
 source_database_name => 'db1',
 instantiation_scn => v_scn);
END;
/
DECLARE
 v_scn NUMBER;
BEGIN
 v_scn := DBMS_FLASHBACK.GET_SYSTEM_CHANGE_NUMBER();
 DBMS_APPLY_ADM.SET_TABLE_INSTANTIATION_SCN@db3(
 source_object_name => 'scott.dept',
 source_database_name => 'db1',
 instantiation_scn => v_scn);
END;
/
8.配置应用进程(db2,db3)
db2:
CONNECT strmadmin/strmadmin@DB2
BEGIN
 DBMS_STREAMS_ADM.ADD_TABLE_RULES(
 table_name => 'scott.dept',
 streams_type => 'apply',
 streams_name => 'apply_simp',
 queue_name => 'strmadmin.streams_queue',
 include_dml => true,
 include_ddl => true,
 source_database => 'db1');
END;
/
db3:
CONNECT strmadmin/strmadmin@DB3
BEGIN
 DBMS_STREAMS_ADM.ADD_TABLE_RULES(
 table_name => 'scott.dept',
 streams_type => 'apply',
 streams_name => 'apply_simp',
 queue_name => 'strmadmin.streams_queue',
 include_dml => true,
 include_ddl => true,
 source_database => 'db1');
END;
9.启动应用进程(db2,db3)
CONNECT strmadmin/strmadmin@DB2
BEGIN
 DBMS_APPLY_ADM.SET_PARAMETER(
         apply_name => 'apply_simp',
         parameter => 'disable_on_error',
         value => 'n');
 DBMS_APPLY_ADM.START_APPLY(
         apply_name => 'apply_simp');
END;
/
BEGIN
 DBMS_APPLY_ADM.SET_PARAMETER(
  apply_name => 'apply_simp',
  parameter => 'disable_on_error',
  value => 'n');
 DBMS_APPLY_ADM.START_APPLY(
  apply_name => 'apply_simp');
END;
/
10.启动捕捉进程(db1)
CONNECT strmadmin/strmadmin@DB1
BEGIN
 DBMS_CAPTURE_ADM.START_CAPTURE(
 capture_name => 'capture_simp');
END;
/

四.dbms_streams_adm的maintain_global过程如何配置stream全库复制方法,适用于10gR2及以后版本
   主从库SID和目录结构要求一致
1 在stream进行配置前,需要做些准备工作
源库与目标库初始化参数的设置
alter system set aq_tm_processes=4 scope=spfile;
alter system set job_queue_processes=5 scope=spfile;
alter system set global_names=true scope=spfile;
alter system set streams_pool_size=51m scope=spfile;
说明streams_pool_size在生产环境中最好>200m
b 源库与目标库tnsnames.ora配置
确保正确,可用tnsping通
c 源库与目标库复制管理员的创建
create user strmadmin identified by strmadmin default tablespace &tbs_name quota unlimited on &tbs_name;
grant connect, resource, dba to strmadmin;
d 源库与目标库创建互连的数据链
connect strmadmin/strmadmin@db1;
create public database link db2.com connect to strmadmin identified by strmadmin using 'db2';
connect strmadmin/strmadmin@db2;
create public database link db1.com connect to strmadmin identified by strmadmin using 'db1';
(说明:必须确保双方的数据库链是可以连通,用pre_instantiation_setup/post_instantiation_setup过程时db link必须用db_name.domain的格式)
e 源库与目标库必须处于归档模式
f 源库与目标库必须创建directory
create directory dir_db1 as '/u01/bak';
create directory dir_db2 as '/u01/bak';
2 在源库执行MAINTAIN_GLOBAL过程(整库级)
connect strmadmin/strmadmin@db1;
begin
 dbms_streams_adm.maintain_global(
 source_directory_object => 'dir_db1',
 destination_directory_object => 'dir_db2',
 source_database => 'db1.com',
 destination_database => 'db2.com',
 perform_actions => true,
 include_ddl => true,
 instantiation => DBMS_STREAMS_ADM.INSTANTIATION_FULL_NETWORK);
end;
说明:在执行maintain_global时,源库与目标库必须创建directory,然后在源库执行, 目标库几乎什么都不用做,stream环境已经配置好
其中,instantiation指定初始化的方式
DBMS_STREAMS_ADM.INSTANTIATION_FULL:expdp全库导出然后impdp全库导入
DBMS_STREAMS_ADM.INSTANTIATION_FULL_NETWORK:使用data pump直接通过网络导入的特性初始化
DBMS_STREAMS_ADM.INSTANTIATION_NONE:不执行初始化。这个选现只有在perform_actions=false的情况下有效,也就是指定先生成脚本,手工初始化,然后在执行配置脚本。
begin
 DBMS_STREAMS_ADM.MAINTAIN_GLOBAL(
 source_directory_object => 'dir_db1',
 destination_directory_object => 'dir_db2',
 source_database => 'db1.com',
 destination_database => 'db2.com',
 perform_actions => false,
 script_directory_object =>'DIR_db1',
 script_name => 'global_script.sql',
 dump_file_name => 'streams_rep.dmp',
 bi_directional => false,
 include_ddl => true,
 instantiation => DBMS_STREAMS_ADM.INSTANTIATION_NONE);
end;

在源库执行MAINTAIN_schemas过程(schema级,注意:从库schemas最好不存在)
connect strmadmin/strmadmin@db1;
begin
 dbms_streams_adm.maintain_schemas(
 schema_names => 'hs_his,hs_users',
 source_directory_object => null,
 destination_directory_object => null,
 source_database => 'db1.com',
 destination_database => 'db2.com',
 perform_actions => true,
 bi_directional => false,
 include_ddl => true,
 instantiation => dbms_streams_adm.instantiation_schema_network);
end;
在源库执行MAINTAIN_TTS过程(table级)
connect strmadmin/strmadmin@db1;
declare
 v_tables dbms_utility.uncl_array;
begin
 v_tables(1) := 'scott.scott';
 v_tables(2) := 'scott.tiger';
 dbms_streams_adm.maintain_tables(
  table_names => v_tables,
  source_directory_object => null,
  destination_directory_object => null,
  source_database => 'db1.com',
  destination_database => 'db2.com',
  perform_actions => true,
  bi_directional => false,
  include_ddl => true,
  instantiation => dbms_streams_adm.instantiation_table_network);
end;
在源库执行MAINTAIN_TTS过程(表空间级)
检查表空间的自包含
exec sys.dbms_tts.transport_set_check('TEST01',true,true);
select * from sys.transport_set_violations;
connect strmadmin/strmadmin@db1;
declare
 v_ts dbms_streams_tablespace_adm.tablespace_set;
begin
 v_ts(1) := 'TEST01';
 dbms_streams_adm.maintain_tts(
  tablespace_names => v_ts,
  source_directory_object => 'dir_db1',
  destination_directory_object => 'dir_db2',
  source_database => 'db1.com',
  destination_database => 'db2.com',
  perform_actions => true,
  /*bi_directional => true,*/
  include_ddl => true);
end;
3 如果在执行2的过程时失败,需要清除脚本
--select script_id from dba_recoverable_script;
--exec dbms_streams_adm.RECOVER_OPERATION('&1','PURGE');
declare
v_script_id varchar2(32);
begin
select script_id into v_script_id from dba_recoverable_script;
dbms_streams_adm.RECOVER_OPERATION(v_script_id,'PURGE');
exception
when no_data_found then
DBMS_OUTPUT.PUT_LINe('no data found') ;
when others then

3 如果在执行2的过程时失败,需要清除脚本
--select script_id from dba_recoverable_script;
--exec dbms_streams_adm.RECOVER_OPERATION('&1','PURGE');
declare
 v_script_id varchar2(32);
begin
 select script_id into v_script_id from dba_recoverable_script;
 dbms_streams_adm.RECOVER_OPERATION(v_script_id,'PURGE');
exception
 when no_data_found then
  DBMS_OUTPUT.PUT_LINe('no data found') ;
 when others then
  DBMS_OUTPUT.PUT_LINe('error~!') ;
end;
4 说明,如果已经存在复制环境,再配置其它类型的复制将会导致失败,所以配置复制前
需要清除已经存在的复制环境.
源与目标库都做:
--exec dbms_streams_adm.REMOVE_STREAMS_CONFIGURATION;
begin
 for cur_pro in (select propagation_name from dba_propagation) loop
  dbms_propagation_adm.drop_propagation(cur_pro.propagation_name);
 end loop;
 dbms_streams_adm.REMOVE_STREAMS_CONFIGURATION;
end;
5 配置失败,发现目标库apply orror
select * from dba_apply;
select * from dba_apply_error;
6 完成stream的表空间复制
connect strmadmin/strmadmin@db1;
dbms_capture_adm.prepare_schema_instantiation(schema_name => 'hs_his');
declare
 v_scn number;
begin
 v_scn := dbms_flashback.get_system_change_number();
 dbms_apply_adm.set_schema_instantiation_scn@db2.com(
  source_schema_name => 'hs_his',
  source_database_name => 'db1.com',
  instantiation_scn => v_scn);
end;

 

http://download.oracle.com/docs/cd/B19306_01/server.102/b14228/config_simple.htm#CACIAHGH

http://dobzhu7781.spaces.live.com/blog/cns!4817887C779350C9!164.entry

 

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/35489/viewspace-629873/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/35489/viewspace-629873/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值