下面的查询需要很长时间才能创建临时表,它只有“228000”的不同记录。
DECLARE todate,fromdate DATETIME;
SET fromdate=DATE_SUB(UTC_TIMESTAMP(),INTERVAL 2 DAY);
SET todate=DATE_ADD(UTC_TIMESTAMP(),INTERVAL 14 DAY);
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
DROP TEMPORARY TABLE IF EXISTS tempabc;
SET max_heap_table_size = 1024*1024*1024;
CREATE TEMPORARY TABLE IF NOT EXISTS tempabc
-- (index using BTREE(id))
ENGINE=MEMORY
AS
(
SELECT SQL_NO_CACHE DISTINCT id
FROM abc
WHERE StartTime BETWEEN fromdate AND todate
);
我已经在'startTime'curlmn上创建了索引,但是创建表还需要20秒。请帮我减少创作时间。
更多信息: -
我之前更改了我的查询我使用“tempabc”临时表来获取输出,现在我使用的是IN子句而不是临时表,现在它需要12秒才能执行,但仍然超过预期时间.. < / p>
早些时候(需要20-30秒)
DECLARE todate,fromdate DATETIME;
SET fromdate=DATE_SUB(UTC_TIMESTAMP(),INTERVAL 2 DAY);
SET todate=DATE_ADD(UTC_TIMESTAMP(),INTERVAL 14 DAY);
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
DROP TEMPORARY TABLE IF EXISTS tempabc;
SET max_heap_table_size = 1024*1024*1024;
CREATE TEMPORARY TABLE IF NOT EXISTS tempabc
-- (index using BTREE(id))
ENGINE=MEMORY
AS
(
SELECT SQL_NO_CACHE DISTINCT id
FROM abc
WHERE StartTime BETWEEN fromdate AND todate
);
SELECT DISTINCT p.xyzID
FROM tempabc s
JOIN xyz_tab p ON p.xyzID=s.ID AND IFNULL(IsGeneric,0)=0;
现在(需要12-14秒)
DECLARE todate,fromdate Timestamp;
SET fromdate=DATE_SUB(UTC_TIMESTAMP(),INTERVAL 2 DAY);
SET todate=DATE_ADD(UTC_TIMESTAMP(),INTERVAL 14 DAY);
SELECT p.xyzID FROM xyz_tab p
WHERE id IN (
SELECT DISTINCT id FROM abc
WHERE StartTime BETWEEN fromdate AND todate )
AND IFNULL(IsGeneric,0)=0 GROUP BY p.xyxID;
但我们需要达到3-5秒的执行时间。
这是我的解释输出。
*************************** 1. row ***************************
id: 1
select_type: SIMPLE
table: abc
partitions: NULL
type: index
possible_keys: ix_starttime_id,IDX_Start_time,IX_id_starttime,IX_id_starttime_prgsvcid
key: IX_id_starttime
key_len: 163
ref: NULL
rows: 18779876
filtered: 1.27
Extra: Using where; Using index; Using temporary; Using filesort; LooseScan
*************************** 2. row ***************************
id: 1
select_type: SIMPLE
table: p
partitions: NULL
type: eq_ref
possible_keys: PRIMARY,IX_seriesid
key: PRIMARY
key_len: 152
ref: onconnectdb.abc.ID
rows: 1
filtered: 100.00
Extra: Using where
以JSON格式说明
EXPLAIN: {
"query_block": {
"select_id": 1,
"cost_info": {
"query_cost": "10139148.44"
},
"grouping_operation": {
"using_temporary_table": true,
"using_filesort": true,
"cost_info": {
"sort_cost": "1.00"
},
"nested_loop": [
{
"table": {
"table_name": "abc",
"access_type": "index",
"possible_keys": [
"ix_starttime_tmsid",
"IDX_Start_time",
"IX_id_starttime",
"IX_id_starttime_prgsvcid"
],
"key": "IX_id_starttime",
"used_key_parts": [
"ID",
"StartTime",
"EndTime"
],
"key_length": "163",
"rows_examined_per_scan": 19280092,
"rows_produced_per_join": 264059,
"filtered": "1.37",
"using_index": true,
"loosescan": true,
"cost_info": {
"read_cost": "393472.45",
"eval_cost": "52812.00",
"prefix_cost": "446284.45",
"data_read_per_join": "2G"
},
"used_columns": [
"ID",
"StartTime"
],
"attached_condition": "(`onconnectdb`.`abc`.`StartTime` between <cache>(fromdate@1) and <cache>(todate@0))"
}
},
{
"table": {
"table_name": "p",
"access_type": "eq_ref",
"possible_keys": [
"PRIMARY",
"IX_seriesid"
],
"key": "PRIMARY",
"used_key_parts": [
"ID"
],
"key_length": "152",
"ref": [
"onconnectdb.abc.ID"
],
"rows_examined_per_scan": 1,
"rows_produced_per_join": 1,
"filtered": "100.00",
"cost_info": {
"read_cost": "9640051.00",
"eval_cost": "0.20",
"prefix_cost": "10139147.44",
"data_read_per_join": "2K"
},
"used_columns": [
"ID",
"xyzID",
"IsGeneric"
],
"attached_condition": "(ifnull(`onconnectdb`.`p`.`IsGeneric`,0) = 0)"
}
}
]
}
}
}
请建议。