我有一个进程,可以在临时表中生成200万条记录(预计会增加到900万条)。然后,临时表中的数据应该被复制到CLOB中并存储回数据库列。
目前,我面临着将数据从临时表复制到CLOB列的性能问题,该列需要 12mins 。代码段如下所示。
有没有更好的方法呢?
代码段
DECLARE
lv_clob_temp CLOB;
lv_content_file REPORT_DETAILS.content_file%type;
lv_rev_header_file REPORT_DETAILS.rev_header_file%type;
lv_rev_content_file REPORT_DETAILS.rev_content_file%type;
type rec_report_temp is table of REPORT_TEMP%ROWTYPE;
tbl_report_temp rec_report_temp;
lv_bulk_limit pls_integer := 100000;
BEGIN
-- fetch master data for the report
SELECT
content_file
, rev_content_file
INTO
lv_content_file
, lv_rev_content_file
FROM REPORT_DETAILS
WHERE rep_id = 1 for update;
dbms_lob.createtemporary(lv_clob_temp, TRUE, DBMS_LOB.SESSION );
--Data for the content extract
open cur_rec for 'SELECT COL1, COL2, COL3, COL4, COL5, COL6, COL7 from REPORT_TEMP';
loop
FETCH cur_rec BULK COLLECT INTO tbl_report_temp LIMIT lv_bulk_limit;
EXIT WHEN tbl_report_temp.count = 0;
for i in 1..tbl_report_temp.count
loop
lv_clob_temp := tbl_report_temp(i).COL1
||','||tbl_report_temp(i).COL2
||','||to_char(tbl_report_temp(i).COL3,'DD/MM/YYYY')
||','||to_char(tbl_report_temp(i).SPAD,'DD/MM/YYYY')
||','||to_char(tbl_report_temp(i).COL5,'DD/MM/YYYY')
||','||tbl_report_temp(i).COL7
||','||to_char(tbl_report_temp(i).COL6,'DD/MM/YYYY')
|| lv_crlf;
DBMS_LOB.APPEND(lv_content_file, lv_clob_temp);
lv_clob_temp := tbl_report_temp(i).COL1
||','||tbl_report_temp(i).COL2
||','||to_char(tbl_report_temp(i).COL3,'DD/MM/YYYY')
||','||to_char(tbl_report_temp(i).SPAD,'DD/MM/YYYY')
||','||to_char(tbl_report_temp(i).COL5,'DD/MM/YYYY')
|| lv_crlf;
DBMS_LOB.APPEND(lv_rev_content_file, lv_clob_temp);
end loop;
end loop;
UPDATE REPORT_DETAILS
set content_file = lv_content_file
, rev_content_file = lv_rev_content_file
WHERE rep_id = 1;
COMMIt;
END;
答案 0 :(得分:0)
我的朋友建议我使用下面给出的脚本
要实现CLOB连接,请使用函数stragg_clob(运行以下脚本)。
create or replace type rmd_agg_type as object
(
total clob,
static function
ODCIAggregateInitialize(sctx IN OUT rmd_agg_type )
return number,
member function
ODCIAggregateIterate(self IN OUT rmd_agg_type ,value IN varchar2 )
return number,
member function
ODCIAggregateTerminate(self IN rmd_agg_type, returnValue OUT clob, flags IN number)
return number,
member function
ODCIAggregateMerge(self IN OUT rmd_agg_type, ctx2 IN rmd_agg_type)
return number
);
/
create or replace type body rmd_agg_type
is
static function ODCIAggregateInitialize(sctx IN OUT rmd_agg_type)
return number
is
begin
sctx := rmd_agg_type( null );
return ODCIConst.Success;
end;
member function ODCIAggregateIterate(self IN OUT rmd_agg_type,
value IN varchar2 )
return number
is
begin
self.total := self.total || ',' || value;
return ODCIConst.Success;
end;
member function ODCIAggregateTerminate(self IN rmd_agg_type,
returnValue OUT clob,
flags IN number)
return number
is
begin
returnValue := ltrim(self.total,',');
return ODCIConst.Success;
end;
member function ODCIAggregateMerge(self IN OUT rmd_agg_type,
ctx2 IN rmd_agg_type)
return number
is
begin
self.total := self.total || ctx2.total;
return ODCIConst.Success;
end;
end;
/
CREATE or replace
FUNCTION stragg_clob(input varchar2 )
RETURN clob
PARALLEL_ENABLE AGGREGATE USING rmd_agg_type;
/
grant execute on stragg_clob to user_schema;
To mimic your scenario I created two table and used it below-populated 2million records and then update the destination table CLOB column by appending values.
create table seed_values (name1 varchar2(10), col2 varchar2(250));
insert into seed_values
select 'TMP'||level, 'ADI'||level from dual connect by level<2000000;
commit;
create table clob_value (base_data clob);
insert into clob_value values ('Value from before seed data#');
commit;
update clob_value set base_data = base_data || (
select sys.stragg_clob(name1||','||col2) as enames from seed_values
);