简单地加入int + tstzrange列只需约100万行

时间:2017-08-09 13:24:28

标签: postgresql date-range

我正在努力解决查询的性能问题,该问题涉及int - 列和tstzrange上的“简单”左连接:

SELECT
      table_1.id_col
    , table_1.time_range
    , table_1.other_col_1
    , table_2.other_col_2
FROM table_1
LEFT JOIN table_2
ON table_1.id_col = table_2.id_col
AND table_1.time_range = table_2.time_range

对于 ~100万行table_1table_2的最终结果集,此查询需要〜80-100秒才能运行在同一个订单上)

此查询是更复杂的CTE查询(实际上选择了这些1 million行的一小部分)的一部分,但我已经解除了存在瓶颈的部分。

我已经为这些列的组合添加了(我认为)适当的索引(GIST - 索引),但是从解释中我想在基本上连接几乎所有行时都会丢弃它。

有没有提高性能的方法?

喜欢按顺序扫描物理行进行顺序扫描吗?

我的表格:

CREATE TABLE data.table_1 (
    table_1_id SERIAL NOT NULL, 
    id_col INTEGER NOT NULL, 
    time_range TSTZRANGE NOT NULL, 
    other_col_1 INTEGER, 
    PRIMARY KEY (table_1_id), 
);

CREATE INDEX idx_table_1_id_col ON data.table_1 (id_col);
CREATE INDEX idx_table_1_time_range ON data.table_1 USING gist (time_range);
CREATE INDEX idx_table_1_id_col_time_range ON data.table_1 USING gist (id_col, time_range);

CREATE TABLE data.table_2 (
    table_2_id SERIAL NOT NULL, 
    id_col INTEGER NOT NULL, 
    time_range TSTZRANGE NOT NULL, 
    other_col_2 DOUBLE PRECISION, 
    PRIMARY KEY (table_2_id), 
);

CREATE INDEX idx_table_2_id_col ON data.table_2 (id_col);
CREATE INDEX idx_table_2_time_range ON data.table_2 USING gist (time_range);
CREATE INDEX idx_table_2_id_col_time_range ON data.table_2 USING gist (id_col, time_range);

这是详细的EXPLAIN:

[
  {
    "Plan": {
      "Node Type": "Hash Join",
      "Join Type": "Left",
      "Startup Cost": 198185.10,
      "Total Cost": 4163704.54,
      "Plan Rows": 73508636,
      "Plan Width": 20,
      "Actual Startup Time": 31055.086,
      "Actual Total Time": 89488.540,
      "Actual Rows": 1015568,
      "Actual Loops": 1,
      "Output": ["table_1.id_col", "table_1.other_col_1", "table_2.other_col_2"],
      "Hash Cond": "((table_1.id_col = table_2.id_col) AND (table_1.time_range = table_2.time_range))",
      "Shared Hit Blocks": 165149,
      "Shared Read Blocks": 632793,
      "Shared Dirtied Blocks": 0,
      "Shared Written Blocks": 0,
      "Local Hit Blocks": 0,
      "Local Read Blocks": 0,
      "Local Dirtied Blocks": 0,
      "Local Written Blocks": 0,
      "Temp Read Blocks": 38220,
      "Temp Written Blocks": 37966,
      "I/O Read Time": 0.000,
      "I/O Write Time": 0.000,
      "Plans": [
        {
          "Node Type": "Seq Scan",
          "Parent Relationship": "Outer",
          "Relation Name": "table_1",
          "Schema": "data",
          "Alias": "table_1",
          "Startup Cost": 0.00,
          "Total Cost": 1492907.36,
          "Plan Rows": 73508636,
          "Plan Width": 34,
          "Actual Startup Time": 24827.453,
          "Actual Total Time": 77143.930,
          "Actual Rows": 904431,
          "Actual Loops": 1,
          "Output": ["table_1.id_col", "table_1.other_col_1", "table_1.time_range"],
          "Shared Hit Blocks": 165147,
          "Shared Read Blocks": 592674,
          "Shared Dirtied Blocks": 0,
          "Shared Written Blocks": 0,
          "Local Hit Blocks": 0,
          "Local Read Blocks": 0,
          "Local Dirtied Blocks": 0,
          "Local Written Blocks": 0,
          "Temp Read Blocks": 0,
          "Temp Written Blocks": 0,
          "I/O Read Time": 0.000,
          "I/O Write Time": 0.000
        },
        {
          "Node Type": "Hash",
          "Parent Relationship": "Inner",
          "Startup Cost": 88292.64,
          "Total Cost": 88292.64,
          "Plan Rows": 4817164,
          "Plan Width": 34,
          "Actual Startup Time": 6204.927,
          "Actual Total Time": 6204.927,
          "Actual Rows": 4817085,
          "Actual Loops": 1,
          "Output": ["table_2.other_col_2", "table_2.id_col", "table_2.time_range"],
          "Hash Buckets": 65536,
          "Original Hash Buckets": 65536,
          "Hash Batches": 128,
          "Original Hash Batches": 128,
          "Peak Memory Usage": 2930,
          "Shared Hit Blocks": 2,
          "Shared Read Blocks": 40119,
          "Shared Dirtied Blocks": 0,
          "Shared Written Blocks": 0,
          "Local Hit Blocks": 0,
          "Local Read Blocks": 0,
          "Local Dirtied Blocks": 0,
          "Local Written Blocks": 0,
          "Temp Read Blocks": 0,
          "Temp Written Blocks": 31422,
          "I/O Read Time": 0.000,
          "I/O Write Time": 0.000,
          "Plans": [
            {
              "Node Type": "Seq Scan",
              "Parent Relationship": "Outer",
              "Relation Name": "table_2",
              "Schema": "data",
              "Alias": "table_2",
              "Startup Cost": 0.00,
              "Total Cost": 88292.64,
              "Plan Rows": 4817164,
              "Plan Width": 34,
              "Actual Startup Time": 0.650,
              "Actual Total Time": 3769.157,
              "Actual Rows": 4817085,
              "Actual Loops": 1,
              "Output": ["table_2.other_col_2", "table_2.id_col", "table_2.time_range"],
              "Shared Hit Blocks": 2,
              "Shared Read Blocks": 40119,
              "Shared Dirtied Blocks": 0,
              "Shared Written Blocks": 0,
              "Local Hit Blocks": 0,
              "Local Read Blocks": 0,
              "Local Dirtied Blocks": 0,
              "Local Written Blocks": 0,
              "Temp Read Blocks": 0,
              "Temp Written Blocks": 0,
              "I/O Read Time": 0.000,
              "I/O Write Time": 0.000
            }
          ]
        }
      ]
    },
    "Planning Time": 0.350,
    "Triggers": [
    ],
    "Execution Time": 89689.809
  }
]

1 个答案:

答案 0 :(得分:0)

使用CLUSTER物理排序数据会将查询时间减少到〜 5秒,这是正常的,考虑到我将进一步选择行的子集:

CLUSTER table_1 USING idx_table_1_id_col_time_range;
CLUSTER table_2 USING idx_table_2_id_col_time_range;