select
m.id,a.name as table1-name,b.name as table2-name,c.name as table3-name
from maintable m
inner join table1 a on m.table1-ID=a.table1-ID
inner join table2 b on m.table2-ID=b.table1-ID
inner join table3 c on m.table3-ID=c.table1-ID
where m.id between m and n
在maintable的id字段建立聚集索引,table1-ID-table3-ID建立非聚集索引table1-table3的id字段一样建立聚集索引
@page_size INT = 50,
@page_index INT = 1
AS
BEGIN declare @page_start INT
declare @page_end INT SET @page_start = @page_size * (@page_index - 1) + 1
SET @page_end = @page_size + @page_start - 1
select * from mainTable t
WHERE t.ID between @page_start and @page_end
order by t.IDEND
如果主表mainTable还有recordDate和recordTime两个字段,根据recordDate或者recordTime检索,mainTable的ID可能是不连续的应该怎么处理呢?能取查询结果的xxx行至yyy行的记录吗?
用存储过程也不错的,如果主表mainTable还有recordDate和recordTime两个字段,根据recordDate或者recordTime检索,mainTable的ID可能是不连续的,这时应该怎么写呢?能否将查询的结果加上行号呢?根据行号来提取xxx行至yyy行的记录。
(
select *,ROW_NUMBER() over(order by ID) as rn from mainTable
) t
WHERE t.rn between @page_start and @page_end
order by t.rn
子表的ID上建立聚集索引,性能不会太差的
select * from
(
select row_number()over(order by createtime ) as rownum,
m.id,a.name as table1-name,b.name as table2-name,c.name as table3-name
from maintable m
inner join table1 a on m.table1-ID=a.table1-ID
inner join table2 b on m.table2-ID=b.table1-ID
inner join table3 c on m.table3-ID=c.table1-ID
) where rownum>10 and rownum<=20
create proc [存储过程名]
(@recordDate [recordDate字段数据类型], --> 查询参数1
@recordTime [recordTime字段数据类型] --> 查询参数2
@xxx int, --> 提取的起始行号
@yyy int --> 提取的结束行号
)
as
begin
select ID,[table1-Name],[table2-Name],[table3-Name] from
(select a.ID,t1.[table1-Name],t2.[table2-Name],t3.[table3-Name],
row_number() over(order by getdate()) 'rn'
from mainTable a
left join table1 t1 on a.[table1-ID]=t1.ID
left join table2 t2 on a.[table2-ID]=t2.ID
left join table3 t3 on a.[table3-ID]=t3.ID
where a.recordDate=@recordDate and a.recordTime=@recordTime) t
where rn between @xxx and @yyy
end
2 从检索的记录中每次取几十条,分页显示。这种方法每次翻页都要从海量数据里面重新检索,数据库会做很多重复的工作,效果和效率有待确认。方法1对于海量数据是不可行的,内存容量就是个问题,对于相对小的数据量,不用游标定位,还有没有别的方法?
方法2效率应该比较低,内存占用不大,服务器的压力会比较大,有没有办法优化?大数据量高效检索和显示的问题,除了上述两种方法,还有没有更好的方法?