dbms_datapump的一点研究(一)

命令行下传输两个数据库的数据:

impdp dump_user/dump_user directory=dump_dir logfile=log.log schemas=tests network_link=dump_link sqlfile=dump_dir:expfull.sql table_exists_action=REPLACE

[@more@]

loc directory 目录

loc logfile 本地生成的日志

schemas 要转入的用户,必须和network_link内的用户名一致

network_link 将要转入的远端数据库的db link

sqlfile 若是没有该文件则生成,若有则使用

table_exists_action 如果要导入的对象已经存在的替换

PL/SQL API调用:

简单的:

declare

hand number;
vmessage varchar2(2000);

begin
hand := dbms_datapump.open (operation => 'IMPORT',
job_mode => 'SCHEMA',
remote_link => 'DUMP_LINK');

dbms_datapump.add_file (handle => hand,
filename => 'impdp_testS.log',
directory => 'DUMP_DIR',
filetype => dbms_datapump.ku$_file_type_log_file);

dbms_datapump.metadata_filter (handle => hand,
name => 'SCHEMA_LIST',
value => 'TESTS');

dbms_datapump.start_job (hand);

end;

复杂的:

declare
my_handle number; -- Data Pump job handle
my_db_link varchar2(30) := 'DUMP_LINK';
ind NUMBER; -- Loop index
percent_done NUMBER; -- Percentage of job complete
job_state VARCHAR2(30); -- To keep track of job state
le ku$_LogEntry; -- For WIP and error messages
js ku$_JobStatus; -- The job status from get_status
jd ku$_JobDesc; -- The job description from get_status
sts ku$_Status; -- The status object returned by get_status

begin
-- create a DataPump job to do a schema-level import using a database link
my_handle := dbms_datapump.open
(
operation => 'IMPORT',
job_mode => 'SCHEMA',
remote_link => my_db_link
);
dbms_output.put_line ('Opened DataPump job with handle: '||to_char (my_handle));

-- set parallelism to 1
dbms_datapump.set_parallel (my_handle, 1);
dbms_output.put_line ('Added parallel');

-- if table exists, replace it
dbms_datapump.set_parameter
(
handle => my_handle,
name => 'TABLE_EXISTS_ACTION',
value => 'REPLACE'
);
dbms_output.put_line ('Added parameter REPLACE');

-- add logfile
dbms_datapump.add_file
(
handle => my_handle,
filename => 'impdp_test.log',
directory => 'DUMP_DIR',
filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
);
dbms_output.put_line ('Added log file');

-- export from schema TEST
dbms_datapump.metadata_filter
(
handle => my_handle,
name => 'SCHEMA_EXPR',
value => 'IN (''SCOTT'')'
);
dbms_output.put_line ('Added schema filter');

-- limit the exported objects from schema TEST to TABLEs
dbms_datapump.metadata_filter
(
handle => my_handle,
name => 'INCLUDE_PATH_EXPR',
value => 'IN (''TABLE'')'
);
dbms_output.put_line ('Added table filter');

-- refresh only the tables mentioned in
-- remote table DBA_USR.TRANSFER_OBJECTS
/*
dbms_datapump.metadata_filter
(
handle => my_handle,
name => 'NAME_EXPR',
value => 'IN (SELECT TABLE_NAME FROM SCOTT.TRANSFER_OBJECTS WHERE OWNER = ''SCOTT'' AND OBJECT_TYPE = ''TABLE'')',
object_type => 'TABLE'
);*/
dbms_output.put_line ('Added only these tables filter');

-- Start the job
dbms_datapump.start_job (my_handle);

-- monitor the DataPump job until it completes
percent_done := 0;
job_state := 'UNDEFINED';
while (job_state != 'COMPLETED') and (job_state != 'STOPPED') loop
dbms_datapump.get_status
(
my_handle,
dbms_datapump.ku$_status_job_error +
dbms_datapump.ku$_status_job_status +
dbms_datapump.ku$_status_wip, -1 , job_state, sts
);
js := sts.job_status;

-- if the percentage done changed, display the new value.
if js.percent_done != percent_done then
dbms_output.put_line ('*** Job percent done = '||to_char (js.percent_done));
percent_done := js.percent_done;
end if;

-- if any work-in-progress or error messages were received, then display them.
if (bitand (sts.mask, dbms_datapump.ku$_status_wip) != 0) then
le := sts.wip;
else
if (bitand (sts.mask, dbms_datapump.ku$_status_job_error) != 0) then
le := sts.error;
else
le := null;
end if;
end if;

if le is not null then
ind := le.FIRST;
while ind is not null loop
dbms_output.put_line (le(ind).LogText);
ind := le.NEXT(ind);
end loop;
end if;
end loop;

-- Indicate that the job finished and detach from it.
dbms_output.put_line ('Job has completed');
dbms_output.put_line ('Final job state = '||job_state);
dbms_datapump.detach (my_handle);
end;

监视传输的情况:

begin
loop
for x in (select * from dba_datapump_jobs)
loop
dbms_output.put_line( to_char(sysdate,'hh24:mi:ss') || ' ' || x.job_name || ', ' || x.state );
dbms_application_info.set_client_info( to_char(sysdate,'hh24:mi:ss') || ' ' || x.job_name || ', ' || x.state );
if ( x.state <> 'EXECUTING' )
then
return;
end if;
dbms_lock.sleep(1);
end loop;
end loop;
end;

注意事项:

如果要传输的表有 long类型的字段则导入将会失败:

Connected to: Oracle Database 10g Enterprise Edition Release 10.2.0.1.0 - 64bit Production
With the Partitioning, OLAP and Data Mining options
Starting "DUMP_USER"."SYS_IMPORT_SCHEMA_01": dump_user/******** directory=dump_dir logfile=logs.log schemas=tests network_link=dump_link
Estimate in progress using BLOCKS method...
Processing object type SCHEMA_EXPORT/TABLE/TABLE_DATA
Total estimation using BLOCKS method: 128 KB
Processing object type SCHEMA_EXPORT/USER
ORA-31684: Object type USER:"TESTS" already exists
Processing object type SCHEMA_EXPORT/SYSTEM_GRANT
Processing object type SCHEMA_EXPORT/ROLE_GRANT
Processing object type SCHEMA_EXPORT/DEFAULT_ROLE
Processing object type SCHEMA_EXPORT/PRE_SCHEMA/PROCACT_SCHEMA
Processing object type SCHEMA_EXPORT/TABLE/TABLE
. . imported "TESTS"."AAA" 1 rows
ORA-31679: Table data object "TESTS"."BBB" has long columns, and longs can not be loaded/unloaded using a network link
Job "DUMP_USER"."SYS_IMPORT_SCHEMA_01" completed with 2 error(s) at 21:02:07

来自 “ ITPUB博客 ” ,链接:http://blog.itpub.net/10130206/viewspace-1039773/,如需转载,请注明出处,否则将追究法律责任。

转载于:http://blog.itpub.net/10130206/viewspace-1039773/

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值