代码段如下:
p_cur_date :=sysdate
v_s1 := TO_CHAR (p_cur_date, 'yyyymmddhh24miss');
v_sendtime := SUBSTR (v_s1, 1, 8) || '235959';
v_sbegintime := SUBSTR (v_s1, 1, 8) || '000000'; -- time
v_rn := 0;
v_tn := 50000; --每个事务的记录数
EXECUTE IMMEDIATE 'alter table msg_his nologging'; LOOP
INSERT /*+ append */INTO msg_his
(OID, p_oid, from_who, to_who, direction, title, content,
TYPE, TIME, branch_code, del_flag, read_status,
deal_status, finish_time, oper_id, move_time, move_month)
(SELECT OID, p_oid, from_who, to_who, direction, title, content,
TYPE, TIME, branch_code, del_flag, read_status, deal_status,
finish_time, oper_id, c1, c2
FROM (SELECT OID, p_oid, from_who, to_who, direction, title,
content, TYPE, TIME, branch_code, del_flag,
read_status, deal_status, finish_time, oper_id,
v_s1 c1, v_s2 c2, ROWNUM rn
FROM message
WHERE TIME >= v_sbegintime
AND TIME <= v_sendtime
AND (TYPE = '12' OR TYPE = '13')
AND ROWNUM <= (v_rn + 1) * v_tn
ORDER BY TIME)
WHERE rn > v_rn * v_tn); DBMS_OUTPUT.put_line ( 'insert round is '
|| v_rn
|| ' and count is '
|| SQL%ROWCOUNT
); IF (SQL%ROWCOUNT < v_tn)
THEN
COMMIT;
EXIT;
END IF; COMMIT;
v_rn := v_rn + 1;
END LOOP;
将表msg_his改为nologging及用append形式发现速度提高不大
用到素引TIME
通过分段提交..速度也没明显变化 LOOP
DELETE message
WHERE TIME < TO_CHAR (p_cur_date - v_n1, 'yyyymmddhh24miss')
AND ROWNUM <= v_tn; IF (SQL%ROWCOUNT < v_tn)
THEN
COMMIT;
EXIT;
END IF; COMMIT;
END LOOP;
此处删除耗时较多...用索引time 分段提交..数据库配置方面有啥能提高的
谢谢...各位多多言
p_cur_date :=sysdate
v_s1 := TO_CHAR (p_cur_date, 'yyyymmddhh24miss');
v_sendtime := SUBSTR (v_s1, 1, 8) || '235959';
v_sbegintime := SUBSTR (v_s1, 1, 8) || '000000'; -- time
v_rn := 0;
v_tn := 50000; --每个事务的记录数
EXECUTE IMMEDIATE 'alter table msg_his nologging'; LOOP
INSERT /*+ append */INTO msg_his
(OID, p_oid, from_who, to_who, direction, title, content,
TYPE, TIME, branch_code, del_flag, read_status,
deal_status, finish_time, oper_id, move_time, move_month)
(SELECT OID, p_oid, from_who, to_who, direction, title, content,
TYPE, TIME, branch_code, del_flag, read_status, deal_status,
finish_time, oper_id, c1, c2
FROM (SELECT OID, p_oid, from_who, to_who, direction, title,
content, TYPE, TIME, branch_code, del_flag,
read_status, deal_status, finish_time, oper_id,
v_s1 c1, v_s2 c2, ROWNUM rn
FROM message
WHERE TIME >= v_sbegintime
AND TIME <= v_sendtime
AND (TYPE = '12' OR TYPE = '13')
AND ROWNUM <= (v_rn + 1) * v_tn
ORDER BY TIME)
WHERE rn > v_rn * v_tn); DBMS_OUTPUT.put_line ( 'insert round is '
|| v_rn
|| ' and count is '
|| SQL%ROWCOUNT
); IF (SQL%ROWCOUNT < v_tn)
THEN
COMMIT;
EXIT;
END IF; COMMIT;
v_rn := v_rn + 1;
END LOOP;
将表msg_his改为nologging及用append形式发现速度提高不大
用到素引TIME
通过分段提交..速度也没明显变化 LOOP
DELETE message
WHERE TIME < TO_CHAR (p_cur_date - v_n1, 'yyyymmddhh24miss')
AND ROWNUM <= v_tn; IF (SQL%ROWCOUNT < v_tn)
THEN
COMMIT;
EXIT;
END IF; COMMIT;
END LOOP;
此处删除耗时较多...用索引time 分段提交..数据库配置方面有啥能提高的
谢谢...各位多多言
解决方案 »
- 请问 oracle能一个数据对应多个实例吗 在没有rac或者ops的情况下
- 导入表时重新命名(或导出表时重新命名).在线等
- 导入.dmp时出错:IMP-00003: 遇到 ORACLE 错误 942
- SQL查询问题,从一个文件中依次读出查询条件,循环查询
- 一字段含有逗号分隔的串,把这条记录按逗号分隔分成多条记录,具体如下,在线等
- 基于tuxedo的模拟外汇交易系统
- 天啊,怎么会出现这种情况呢,大家都来看看
- 问两个比较菜的问题?
- ???????ora-00101:invalid specification for system parameter MTS-DISPATCHERS
- 送分问题(关于日期格式的,不知道放在这里适不适合,jsp+servlet+orcle),还请赐教!!!
- 求oracle头文件修改工具:直接修改.dmp的头信息,将高版本导出的oracle.dmp文件导入到底版本的oracle中
- 关于‘资源计划’功能的利用的问题
content, TYPE, TIME, branch_code, del_flag,
read_status, deal_status, finish_time, oper_id,
v_s1 c1, v_s2 c2, ROWNUM rn
FROM message
WHERE TIME >= v_sbegintime
AND TIME <= v_sendtime
AND (TYPE = '12' OR TYPE = '13')
AND ROWNUM <= (v_rn + 1) * v_tn
ORDER BY TIME)1、time字段有没建索引
2、TIME >= v_sbegintime AND TIME <= v_sendtime每次循环大概有多少条记录
3、查看分析,千万不要走type索引,type值估计存在message表值不多
2.TIME >= v_sbegintime AND TIME <= v_sendtime是查当天记录 循环次数是ROWNUM <= (v_rn + 1) * v_tn 和外层WHERE rn > v_rn * v_tn控制 也就是 v_tn := 50000; 条3.type没有索引,
type值估计存在message表值不多??多谢..
设置大一点的temp空间 Undo空间
nologging应该会快不少,除非你的系统IO不会出现争用。
并行插入不知道会不会有效。
如果不要求每次都一样,可以用其它的方法来细分每次提交的数据量,
如用TIME细分。
每次用rownum >a and rownum<X这样性能非常不好。因为TIME 上有索引,如果在time 上细分段,再提交,速度应该会快很多。
如果不用order by在数据库变化的情况下 能保证数据是一直的吗
有没有可能 第一次 第二次都查到记录A用时间分段在分布比较不均匀的情况下 要是一个时间段的记录太多(2000W)是否需要再分呢
试试 先...
谢谢
declare
begin
for i in 1..10 loop
DELETE from message where rownum<=10000;
commit;
end loop;
end;
和
SQL2:
declare
type rtab is table of rowid index by BINARY_INTEGER;
v_rid rtab
begin
for i in 1..10 loop;
select rowid into v_rid from message where rownum<=10000;
forall n in 1..v_rid.count
delete from message where rowid=v_rid(n);
commit;
end loop;end;
照书上说SQL2比SQL1快
但实际测试 速度差不多 有时还慢..
是否是sql2写得不对...
多谢
p_cur_date := SYSDATE;
v_s1 := TO_CHAR ( p_cur_date, 'yyyymmddhh24miss' );
v_sendtime := SUBSTR ( v_s1, 1, 8 ) || '235959';
v_sbegintime := SUBSTR ( v_s1, 1, 8 ) || '000000'; -- time INSERT /*+ APPEND */INTO msg_his
( OID, p_oid, from_who, to_who, direction, title, content, TYPE, TIME, branch_code
, del_flag, read_status, deal_status, finish_time, oper_id, move_time, move_month )
SELECT OID, p_oid, from_who, to_who, direction, title, content, TYPE, TIME, branch_code
, del_flag, read_status, deal_status, finish_time, oper_id, c1, c2
FROM MESSAGE
WHERE TIME >= v_sbegintime AND TIME <= v_sendtime AND TYPE IN ( '12', '13' );
END;
/
2. message的索引,order by将消耗大量的CPU,如果可能,尽量不用,
type是否有索引?
如果上面的问题解决了还是很慢,那么你可以检查一下这个表的参数,看看这个表是不是碎片太多?看看是不是行迁移太多?
3. 对于千万级的表,索引及表参数将是关键,
4. 如果还是很慢,那么请你把此语句的执行计划拿出来看看,看使用了哪些索引?看看语句的连接模式?hash join?实际上,说了这么多废话的核心就是: 执行计划建议楼主把执行计划拿出来大家帮忙看看,基本上能解决问题的.提示一下:如果执行计划中所以使用不正确(实际上所以存在),那么请看看数据库的优化模式,RBO?CBO?如果是RBO那么看看是否曾经对该表进行过分析?又或者分析的日期离现在太久了
----------------------------------------------------------
0 INSERT STATEMENT Optimizer=CHOOSE (Cost=112 Card=60 Bytes=33
840) 1 0 VIEW (Cost=112 Card=60 Bytes=33840)
2 1 COUNT (STOPKEY)
3 2 TABLE ACCESS (BY INDEX ROWID) OF 'BC_MESSAGE' (Cost=11
2 Card=1359 Bytes=123669) 4 3 INDEX (RANGE SCAN) OF 'BC_MESSAGE_TIME' (NON-UNIQUE)
(Cost=16 Card=2717)Statistics
----------------------------------------------------------
0 recursive calls
29 db block gets
9 consistent gets
6 physical reads
5112 redo size
619 bytes sent via SQL*Net to client
1556 bytes received via SQL*Net from client
3 SQL*Net roundtrips to/from client
1 sorts (memory)
0 sorts (disk)
10 rows processed
type没有建索引....执行计划如上 帮忙分析哈...谢谢
select 语句加上全表扫描和并行。
采用并行写入。
还有,频繁的向数据库提交事务,会造成大量的写日志消耗资源。
在你的执行计划中,可以看到你的日志消耗是其他消耗的总和的几倍。
5112 redo size
然后,逐条察看你的Sql语句的执行计划和trace。
看执行计划在于察看索引的使用情况,察看trace在于了解sql语句的硬件资源耗费情况,只有这几项结合起来再加上自己的经验才能判断性能的瓶颈在哪里。
TYPE, TIME, branch_code, del_flag, read_status, deal_status,
finish_time, oper_id, c1, c2
FROM (SELECT OID, p_oid, from_who, to_who, direction, title,
content, TYPE, TIME, branch_code, del_flag,
read_status, deal_status, finish_time, oper_id,
v_s1 c1, v_s2 c2, ROWNUM rn
FROM message
WHERE TIME >= v_sbegintime
AND TIME <= v_sendtime
AND (TYPE = '12' OR TYPE = '13')
AND ROWNUM <= (v_rn + 1) * v_tn
ORDER BY TIME)
还有,你的这部分sql用到了一个循环里面,实在很不好,每次到这里都需要查询一次。建议使用游标循环,用fetch来控制数据,这样只需要查询一次就可以完成你想要完成的事情了。
--create table t_test1 as select table_name,owner from dba_tables where 0=1;
set serveroutput on size 100000
set timing on
declare
type typ1 IS TABLE OF t_test1%ROWTYPE INDEX BY BINARY_INTEGER;
rec_tab typ1;
CURSOR c1 IS SELECT table_name, owner from dba_tables;
i number;
j number;
k number;
begin
OPEN c1;
loop
FETCH c1 BULK COLLECT INTO rec_tab LIMIT 2699;
i := c1%ROWCOUNT;
exit when not rec_tab.EXISTS(1);
forall j in rec_tab.FIRST..rec_tab.LAST
insert into t_test1 values rec_tab(j);
k := SQL%ROWCOUNT;
dbms_output.put_line('insert '||k||' rows');
commit;
exit when c1%notfound;
end loop;
close c1;
end;
/