From fe85014b948195fcff901ed3b5628ec161a48a58 Mon Sep 17 00:00:00 2001 From: Arenatlx <314806019@qq.com> Date: Wed, 25 Sep 2024 20:29:48 +0800 Subject: [PATCH 1/2] This is an automated cherry-pick of #56227 Signed-off-by: ti-chi-bot --- executor/tiflashtest/BUILD.bazel | 4 + executor/tiflashtest/tiflash_test.go | 417 ++ .../testdata/plan_suite_out.json | 4011 +++++++++++++++++ pkg/planner/core/find_best_task.go | 3016 +++++++++++++ 4 files changed, 7448 insertions(+) create mode 100644 pkg/planner/core/casetest/physicalplantest/testdata/plan_suite_out.json create mode 100644 pkg/planner/core/find_best_task.go diff --git a/executor/tiflashtest/BUILD.bazel b/executor/tiflashtest/BUILD.bazel index 00c3364890678..d8445cbf09996 100644 --- a/executor/tiflashtest/BUILD.bazel +++ b/executor/tiflashtest/BUILD.bazel @@ -9,7 +9,11 @@ go_test( ], flaky = True, race = "on", +<<<<<<< HEAD:executor/tiflashtest/BUILD.bazel shard_count = 38, +======= + shard_count = 45, +>>>>>>> 8df006280e9 (planner: make converge index merge path feel the prefer tiflash hint (#56227)):pkg/executor/test/tiflashtest/BUILD.bazel deps = [ "//config", "//domain", diff --git a/executor/tiflashtest/tiflash_test.go b/executor/tiflashtest/tiflash_test.go index 7a99bab37c86d..103348ad99c91 100644 --- a/executor/tiflashtest/tiflash_test.go +++ b/executor/tiflashtest/tiflash_test.go @@ -1738,3 +1738,420 @@ func TestMppStoreCntWithErrors(t *testing.T) { require.Nil(t, failpoint.Disable(mppStoreCountSetLastUpdateTimeP2)) require.Nil(t, failpoint.Disable(mppStoreCountPDError)) } +<<<<<<< HEAD:executor/tiflashtest/tiflash_test.go +======= + +func TestMPP47766(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(1)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_allow_mpp=1") + tk.MustExec("set @@session.tidb_enforce_mpp=1") + tk.MustExec("set @@session.tidb_allow_tiflash_cop=off") + + tk.MustExec("CREATE TABLE `traces` (" + + " `test_time` timestamp NOT NULL," + + " `test_time_gen` date GENERATED ALWAYS AS (date(`test_time`)) VIRTUAL," + + " KEY `traces_date_idx` (`test_time_gen`)" + + ")") + tk.MustExec("alter table `traces` set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "traces") + err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustQuery("explain select date(test_time), count(1) as test_date from `traces` group by 1").Check(testkit.Rows( + "Projection_4 8000.00 root test.traces.test_time_gen->Column#5, Column#4", + "└─HashAgg_8 8000.00 root group by:test.traces.test_time_gen, funcs:count(1)->Column#4, funcs:firstrow(test.traces.test_time_gen)->test.traces.test_time_gen", + " └─TableReader_20 10000.00 root MppVersion: 2, data:ExchangeSender_19", + " └─ExchangeSender_19 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TableFullScan_18 10000.00 mpp[tiflash] table:traces keep order:false, stats:pseudo")) + tk.MustQuery("explain select /*+ read_from_storage(tiflash[traces]) */ date(test_time) as test_date, count(1) from `traces` group by 1").Check(testkit.Rows( + "TableReader_31 8000.00 root MppVersion: 2, data:ExchangeSender_30", + "└─ExchangeSender_30 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection_5 8000.00 mpp[tiflash] date(test.traces.test_time)->Column#5, Column#4", + " └─Projection_26 8000.00 mpp[tiflash] Column#4, test.traces.test_time", + " └─HashAgg_27 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#4, funcs:firstrow(Column#15)->test.traces.test_time", + " └─ExchangeReceiver_29 8000.00 mpp[tiflash] ", + " └─ExchangeSender_28 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", + " └─HashAgg_25 8000.00 mpp[tiflash] group by:Column#17, funcs:count(1)->Column#14, funcs:firstrow(Column#16)->Column#15", + " └─Projection_32 10000.00 mpp[tiflash] test.traces.test_time->Column#16, date(test.traces.test_time)->Column#17", + " └─TableFullScan_15 10000.00 mpp[tiflash] table:traces keep order:false, stats:pseudo")) +} + +func TestUnionScan(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(2)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_allow_mpp=1") + tk.MustExec("set @@session.tidb_enforce_mpp=1") + tk.MustExec("set @@session.tidb_allow_tiflash_cop=off") + + for x := 0; x < 2; x++ { + tk.MustExec("drop table if exists t") + if x == 0 { + // Test cache table. + tk.MustExec("create table t(a int not null primary key, b int not null)") + tk.MustExec("alter table t set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "t") + err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustExec("alter table t cache") + } else { + // Test dirty transaction. + tk.MustExec("create table t(a int not null primary key, b int not null) partition by hash(a) partitions 2") + tk.MustExec("alter table t set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "t") + err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + } + + insertStr := "insert into t values(0, 0)" + for i := 1; i < 10; i++ { + insertStr += fmt.Sprintf(",(%d, %d)", i, i) + } + tk.MustExec(insertStr) + + if x != 0 { + // Test dirty transaction. + tk.MustExec("begin") + } + + // Test Basic. + sql := "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" + checkMPPInExplain(t, tk, "explain "+sql) + tk.MustQuery(sql).Check(testkit.Rows("10")) + + // Test Delete. + tk.MustExec("delete from t where a = 0") + + sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" + checkMPPInExplain(t, tk, "explain "+sql) + tk.MustQuery(sql).Check(testkit.Rows("9")) + + sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ a, b from t order by 1" + checkMPPInExplain(t, tk, "explain "+sql) + tk.MustQuery(sql).Check(testkit.Rows("1 1", "2 2", "3 3", "4 4", "5 5", "6 6", "7 7", "8 8", "9 9")) + + // Test Insert. + tk.MustExec("insert into t values(100, 100)") + + sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" + checkMPPInExplain(t, tk, "explain "+sql) + tk.MustQuery(sql).Check(testkit.Rows("10")) + + sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ a, b from t order by 1, 2" + checkMPPInExplain(t, tk, "explain "+sql) + tk.MustQuery(sql).Check(testkit.Rows("1 1", "2 2", "3 3", "4 4", "5 5", "6 6", "7 7", "8 8", "9 9", "100 100")) + + // Test Update + tk.MustExec("update t set b = 200 where a = 100") + + sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" + checkMPPInExplain(t, tk, "explain "+sql) + tk.MustQuery(sql).Check(testkit.Rows("10")) + + sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ a, b from t order by 1, 2" + checkMPPInExplain(t, tk, "explain "+sql) + tk.MustQuery(sql).Check(testkit.Rows("1 1", "2 2", "3 3", "4 4", "5 5", "6 6", "7 7", "8 8", "9 9", "100 200")) + + if x != 0 { + // Test dirty transaction. + tk.MustExec("commit") + } + + sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" + checkMPPInExplain(t, tk, "explain "+sql) + tk.MustQuery(sql).Check(testkit.Rows("10")) + + if x == 0 { + tk.MustExec("alter table t nocache") + } + } +} + +func checkMPPInExplain(t *testing.T, tk *testkit.TestKit, sql string) { + rows := tk.MustQuery(sql).Rows() + resBuff := bytes.NewBufferString("") + for _, row := range rows { + fmt.Fprintf(resBuff, "%s\n", row) + } + res := resBuff.String() + require.Contains(t, res, "mpp[tiflash]") +} + +func TestMPPRecovery(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(2)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec("create table t(a int, b int)") + tk.MustExec("alter table t set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "t") + err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + + checkStrs := []string{"0 0"} + insertStr := "insert into t values(0, 0)" + for i := 1; i < 1500; i++ { + insertStr += fmt.Sprintf(",(%d, %d)", i, i) + checkStrs = append(checkStrs, fmt.Sprintf("%d %d", i, i)) + } + tk.MustExec(insertStr) + tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") + sql := "select * from t order by 1, 2" + const packagePath = "github.com/pingcap/tidb/pkg/executor/internal/mpp/" + + require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_mock_enable", "return()")) + require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_ignore_recovery_err", "return()")) + // Test different chunk size. And force one mpp err. + { + require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_max_err_times", "return(1)")) + + tk.MustExec("set @@tidb_max_chunk_size = default") + tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) + tk.MustExec("set @@tidb_max_chunk_size = 32") + tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) + + require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_max_err_times")) + } + + // Test exceeds max recovery times. Default max times is 3. + { + require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_max_err_times", "return(5)")) + + tk.MustExec("set @@tidb_max_chunk_size = 32") + err := tk.QueryToErr(sql) + strings.Contains(err.Error(), "mock mpp err") + + require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_max_err_times")) + } + + { + // When AllowFallbackToTiKV, mpp err recovery should be disabled. + // So event we inject mock err multiple times, the query should be ok. + tk.MustExec("set @@tidb_allow_fallback_to_tikv = \"tiflash\"") + require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_max_err_times", "return(5)")) + + tk.MustExec("set @@tidb_max_chunk_size = 32") + tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) + + tk.MustExec("set @@tidb_allow_fallback_to_tikv = default") + require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_max_err_times")) + } + + // Test hold logic. Default hold 4 * MaxChunkSize rows. + { + require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_max_err_times", "return(0)")) + + tk.MustExec("set @@tidb_max_chunk_size = 32") + expectedHoldSize := 2 + require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_hold_size", fmt.Sprintf("1*return(%d)", expectedHoldSize))) + tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) + require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_hold_size")) + + require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_max_err_times")) + } + require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_ignore_recovery_err")) + require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_mock_enable")) + + { + // We have 2 mock tiflash, but the table is small, so only 1 tiflash node is in computation. + require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_check_node_cnt", "return(1)")) + + tk.MustExec("set @@tidb_max_chunk_size = 32") + tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) + + require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_check_node_cnt")) + } + + tk.MustExec("set @@tidb_max_chunk_size = default") +} + +func TestIssue50358(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(1)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int not null primary key, b int not null)") + tk.MustExec("alter table t set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "t") + err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustExec("insert into t values(1,0)") + tk.MustExec("insert into t values(2,0)") + + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1(c int not null primary key)") + tk.MustExec("alter table t1 set tiflash replica 1") + tb = external.GetTableByName(t, tk, "test", "t1") + err = domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustExec("insert into t1 values(3)") + + tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") + tk.MustExec("set @@session.tidb_allow_mpp=ON") + for i := 0; i < 20; i++ { + // test if it is stable. + tk.MustQuery("select 8 from t join t1").Check(testkit.Rows("8", "8")) + } +} + +func TestMppAggShouldAlignFinalMode(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(1)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (" + + " d date," + + " v int," + + " primary key(d, v)" + + ") partition by range columns (d) (" + + " partition p1 values less than ('2023-07-02')," + + " partition p2 values less than ('2023-07-03')" + + ");") + tk.MustExec("alter table t set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "t") + err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustExec(`set tidb_partition_prune_mode='static';`) + err = failpoint.Enable("github.com/pingcap/tidb/pkg/expression/aggregation/show-agg-mode", "return(true)") + require.Nil(t, err) + + tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") + tk.MustQuery("explain format='brief' select 1 from (" + + " select /*+ read_from_storage(tiflash[t]) */ sum(1)" + + " from t where d BETWEEN '2023-07-01' and '2023-07-03' group by d" + + ") total;").Check(testkit.Rows("Projection 400.00 root 1->Column#4", + "└─HashAgg 400.00 root group by:test.t.d, funcs:count(complete,1)->Column#8", + " └─PartitionUnion 400.00 root ", + " ├─Projection 200.00 root test.t.d", + " │ └─HashAgg 200.00 root group by:test.t.d, funcs:firstrow(partial2,test.t.d)->test.t.d, funcs:count(final,Column#12)->Column#9", + " │ └─TableReader 200.00 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 200.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 200.00 mpp[tiflash] group by:test.t.d, funcs:count(partial1,1)->Column#12", + " │ └─TableRangeScan 250.00 mpp[tiflash] table:t, partition:p1 range:[2023-07-01,2023-07-03], keep order:false, stats:pseudo", + " └─Projection 200.00 root test.t.d", + " └─HashAgg 200.00 root group by:test.t.d, funcs:firstrow(partial2,test.t.d)->test.t.d, funcs:count(final,Column#14)->Column#10", + " └─TableReader 200.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 200.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 200.00 mpp[tiflash] group by:test.t.d, funcs:count(partial1,1)->Column#14", + " └─TableRangeScan 250.00 mpp[tiflash] table:t, partition:p2 range:[2023-07-01,2023-07-03], keep order:false, stats:pseudo")) + + err = failpoint.Disable("github.com/pingcap/tidb/pkg/expression/aggregation/show-agg-mode") + require.Nil(t, err) +} + +func TestMppTableReaderCacheForSingleSQL(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(1)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t(a int, b int, primary key(a))") + tk.MustExec("alter table t set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "t") + err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + + tk.MustExec("create table t2(a int, b int) partition by hash(b) partitions 4") + tk.MustExec("alter table t2 set tiflash replica 1") + tb = external.GetTableByName(t, tk, "test", "t2") + err = domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustExec("insert into t values(1, 1)") + tk.MustExec("insert into t values(2, 2)") + tk.MustExec("insert into t values(3, 3)") + tk.MustExec("insert into t values(4, 4)") + tk.MustExec("insert into t values(5, 5)") + + tk.MustExec("insert into t2 values(1, 1)") + tk.MustExec("insert into t2 values(2, 2)") + tk.MustExec("insert into t2 values(3, 3)") + tk.MustExec("insert into t2 values(4, 4)") + tk.MustExec("insert into t2 values(5, 5)") + + tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") + tk.MustExec("set @@session.tidb_allow_mpp=ON") + tk.MustExec("set @@session.tidb_enforce_mpp=ON") + tk.MustExec("set @@session.tidb_max_chunk_size=32") + + // Test TableReader cache for single SQL. + type testCase struct { + sql string + expectHitNum int32 + expectMissNum int32 + } + + testCases := []testCase{ + // Non-Partition + // Cache hit + {"select * from t", 0, 1}, + {"select * from t union select * from t", 1, 1}, + {"select * from t union select * from t t1 union select * from t t2", 2, 1}, + {"select * from t where b <= 3 union select * from t where b > 3", 1, 1}, // both full range + {"select * from t where a <= 3 union select * from t where a <= 3", 1, 1}, // same range + {"select * from t t1 join t t2 on t1.b=t2.b", 1, 1}, + + // Cache miss + {"select * from t union all select * from t", 0, 2}, // different mpp task root + {"select * from t where a <= 3 union select * from t where a > 3", 0, 2}, // different range + + // Partition + // Cache hit + {"select * from t2 union select * from t2", 1, 1}, + {"select * from t2 where b = 1 union select * from t2 where b = 5", 1, 1}, // same partition, full range + {"select * from t2 where b = 1 and a < 3 union select * from t2 where b = 5 and a < 3", 1, 1}, // same partition, same range + {"select * from t2 t1 join t2 t2 on t1.b=t2.b", 1, 1}, + {"select * from t2 t1 join t2 t2 on t1.b=t2.b where t1.a = 2 and t2.a = 2", 1, 1}, + + // Cache miss + {"select * from t2 union select * from t2 where b = 1", 0, 2}, // different partition + {"select * from t2 where b = 2 union select * from t2 where b = 1", 0, 2}, // different partition + } + + var hitNum, missNum atomic.Int32 + hitFunc := func() { + hitNum.Add(1) + } + missFunc := func() { + missNum.Add(1) + } + failpoint.EnableCall("github.com/pingcap/tidb/pkg/planner/core/mppTaskGeneratorTableReaderCacheHit", hitFunc) + failpoint.EnableCall("github.com/pingcap/tidb/pkg/planner/core/mppTaskGeneratorTableReaderCacheMiss", missFunc) + for _, tc := range testCases { + hitNum.Store(0) + missNum.Store(0) + tk.MustQuery(tc.sql) + require.Equal(t, tc.expectHitNum, hitNum.Load()) + require.Equal(t, tc.expectMissNum, missNum.Load()) + } +} + +func TestIndexMergeCarePreferTiflash(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(1)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE `t` (" + + "`i` bigint(20) NOT NULL, " + + "`w` varchar(32) NOT NULL," + + "`l` varchar(32) NOT NULL," + + "`a` tinyint(4) NOT NULL DEFAULT '0'," + + "`m` int(11) NOT NULL DEFAULT '0'," + + "`s` int(11) NOT NULL DEFAULT '0'," + + "PRIMARY KEY (`i`) /*T![clustered_index] NONCLUSTERED */," + + "KEY `idx_win_user_site_code` (`w`,`m`)," + + "KEY `idx_lose_user_site_code` (`l`,`m`)," + + "KEY `idx_win_site_code_status` (`w`,`a`)," + + "KEY `idx_lose_site_code_status` (`l`,`a`)" + + ")") + tk.MustExec("alter table t set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "t") + err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustQuery("explain format=\"brief\" SELECT" + + " /*+ read_from_storage(tiflash[a]) */ a.i FROM t a WHERE a.s = 0 AND a.a NOT IN (-1, 0) AND m >= 1726910326 AND m <= 1726910391 AND ( a.w IN ('1123') OR a.l IN ('1123'))").Check( + testkit.Rows("TableReader 0.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 0.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 0.00 mpp[tiflash] test.t.i", + " └─Selection 0.00 mpp[tiflash] ge(test.t.m, 1726910326), le(test.t.m, 1726910391), not(in(test.t.a, -1, 0)), or(eq(test.t.w, \"1123\"), eq(test.t.l, \"1123\"))", + " └─TableFullScan 10.00 mpp[tiflash] table:a pushed down filter:eq(test.t.s, 0), keep order:false, stats:pseudo")) +} +>>>>>>> 8df006280e9 (planner: make converge index merge path feel the prefer tiflash hint (#56227)):pkg/executor/test/tiflashtest/tiflash_test.go diff --git a/pkg/planner/core/casetest/physicalplantest/testdata/plan_suite_out.json b/pkg/planner/core/casetest/physicalplantest/testdata/plan_suite_out.json new file mode 100644 index 0000000000000..d00d6941014cb --- /dev/null +++ b/pkg/planner/core/casetest/physicalplantest/testdata/plan_suite_out.json @@ -0,0 +1,4011 @@ +[ + { + "Name": "TestMPPHints", + "Cases": [ + { + "SQL": "select /*+ MPP_1PHASE_AGG() */ a, sum(b) from t group by a, c", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#10, Column#9, funcs:sum(Column#8)->Column#5, funcs:firstrow(Column#9)->test.t.a", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#8, test.t.a->Column#9, test.t.c->Column#10", + " └─ExchangeReceiver 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ MPP_2PHASE_AGG() */ a, sum(b) from t group by a, c", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#10)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, Column#14, funcs:sum(Column#12)->Column#10", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#12, test.t.a->Column#13, test.t.c->Column#14", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ shuffle_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ broadcast_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_1PHASE_AGG() */ a, sum(b) from t group by a, c", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#7, Column#8, funcs:sum(Column#6)->Column#5, funcs:firstrow(Column#7)->test.t.a", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#6, test.t.a->Column#7, test.t.c->Column#8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_2PHASE_AGG() */ a, sum(b) from t group by a, c", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#8)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#11, Column#12, funcs:sum(Column#10)->Column#8", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#10, test.t.a->Column#11, test.t.c->Column#12", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t1) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t1) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t1) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_probe(t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_probe(t1) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]The MPP join hints are in conflict, and you can only specify join method hints that are currently supported by MPP mode now" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]The MPP join hints are in conflict, and you can only specify join method hints that are currently supported by MPP mode now" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]The MPP join hints are in conflict, and you can only specify join method hints that are currently supported by MPP mode now" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]The MPP join hints are in conflict, and you can only specify join method hints that are currently supported by MPP mode now" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_1PHASE_AGG(), hash_agg() */ a, sum(b) from t group by a, c", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#7, Column#8, funcs:sum(Column#6)->Column#5, funcs:firstrow(Column#7)->test.t.a", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#6, test.t.a->Column#7, test.t.c->Column#8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_2PHASE_AGG(), stream_agg() */ a, sum(b) from t group by a, c", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#6)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#10, Column#9, funcs:sum(Column#8)->Column#6", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#8, test.t.a->Column#9, test.t.c->Column#10", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_1PHASE_AGG(), use_index(t, idx_a) */ a, sum(b) from t where a > 1 group by a, c", + "Plan": [ + "Projection 2666.67 root test.t.a, Column#5", + "└─HashAgg 2666.67 root group by:test.t.a, test.t.c, funcs:sum(Column#7)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─IndexLookUp 2666.67 root ", + " ├─IndexRangeScan(Build) 3333.33 cop[tikv] table:t, index:idx_a(a) range:(1,+inf], keep order:false, stats:pseudo", + " └─HashAgg(Probe) 2666.67 cop[tikv] group by:test.t.a, test.t.c, funcs:sum(test.t.b)->Column#7", + " └─TableRowIDScan 3333.33 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]No available path for table test.t with the store type tiflash of the hint /*+ read_from_storage */, please check the status of the table replica and variable value of tidb_isolation_read_engines(map[0:{} 1:{} 2:{}])", + "[planner:1815]The agg can not push down to the MPP side, the MPP_1PHASE_AGG() hint is invalid" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_1PHASE_AGG(), ignore_index(t, idx_a) */ a, sum(b) from t where a > 1 group by a, c", + "Plan": [ + "TableReader 2666.67 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 2666.67 mpp[tiflash] test.t.a, Column#5", + " └─Projection 2666.67 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 2666.67 mpp[tiflash] group by:Column#7, Column#8, funcs:sum(Column#6)->Column#5, funcs:firstrow(Column#7)->test.t.a", + " └─Projection 3333.33 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#6, test.t.a->Column#7, test.t.c->Column#8", + " └─ExchangeReceiver 3333.33 mpp[tiflash] ", + " └─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─Selection 3333.33 mpp[tiflash] gt(test.t.a, 1)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_2PHASE_AGG(), force_index(t, idx_b) */ a, sum(b) from t where b < 2 group by a, c", + "Plan": [ + "Projection 2658.67 root test.t.a, Column#5", + "└─HashAgg 2658.67 root group by:test.t.a, test.t.c, funcs:sum(Column#7)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─IndexLookUp 2658.67 root ", + " ├─IndexRangeScan(Build) 3323.33 cop[tikv] table:t, index:idx_b(b) range:[-inf,2), keep order:false, stats:pseudo", + " └─HashAgg(Probe) 2658.67 cop[tikv] group by:test.t.a, test.t.c, funcs:sum(test.t.b)->Column#7", + " └─TableRowIDScan 3323.33 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]No available path for table test.t with the store type tiflash of the hint /*+ read_from_storage */, please check the status of the table replica and variable value of tidb_isolation_read_engines(map[0:{} 1:{} 2:{}])", + "[planner:1815]The agg can not push down to the MPP side, the MPP_2PHASE_AGG() hint is invalid" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_2PHASE_AGG(), index_merge(t, idx_b, idx_a) */ a, sum(b) from t where b < 2 or a > 2 group by a, c", + "Plan": [ + "TableReader 4439.11 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 4439.11 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 4439.11 mpp[tiflash] test.t.a, Column#5", + " └─Projection 4439.11 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 4439.11 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#8)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─ExchangeReceiver 4439.11 mpp[tiflash] ", + " └─ExchangeSender 4439.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─HashAgg 4439.11 mpp[tiflash] group by:Column#11, Column#12, funcs:sum(Column#10)->Column#8", + " └─Projection 5548.89 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#10, test.t.a->Column#11, test.t.c->Column#12", + " └─Selection 5548.89 mpp[tiflash] or(lt(test.t.b, 2), gt(test.t.a, 2))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": [ + "[parser:8061]Optimizer hint index_merge is not supported by TiDB and is ignored" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2, t3]), shuffle_join(t1, t2, t3), straight_join() */ * from t t1, t t2, t t3 where t1.a=t2.a and t2.b=t3.b", + "Plan": [ + "TableReader 15593.77 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 15593.77 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 15593.77 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t3 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 12475.01 mpp[tiflash] ", + " └─ExchangeSender 12475.01 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " └─HashJoin 12475.01 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9980.01 mpp[tiflash] ", + " │ └─ExchangeSender 9980.01 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9980.01 mpp[tiflash] not(isnull(test.t.a)), not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2, t3]), shuffle_join(t1, t2, t3), leading(t3, t1) */ * from t t1, t t2, t t3 where t1.a=t2.a and t2.b=t3.b", + "Plan": [ + "TableReader 124625374.88 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 124625374.88 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 124625374.88 mpp[tiflash] test.t.a, test.t.b, test.t.c, test.t.a, test.t.b, test.t.c, test.t.a, test.t.b, test.t.c", + " └─HashJoin 124625374.88 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a) eq(test.t.b, test.t.b)]", + " ├─ExchangeReceiver(Build) 9980.01 mpp[tiflash] ", + " │ └─ExchangeSender 9980.01 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.b, collate: binary]", + " │ └─Selection 9980.01 mpp[tiflash] not(isnull(test.t.a)), not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 99800100.00 mpp[tiflash] ", + " └─ExchangeSender 99800100.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.b, collate: binary]", + " └─HashJoin 99800100.00 mpp[tiflash] CARTESIAN inner join", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t3 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2, t3]), broadcast_join(t1, t2, t3), straight_join() */ * from t t2, t t1, t t3 where t1.a=t2.a and t2.b=t3.b", + "Plan": [ + "TableReader 15593.77 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 15593.77 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 15593.77 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t3 pushed down filter:empty, keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12475.01 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9980.01 mpp[tiflash] ", + " │ └─ExchangeSender 9980.01 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9980.01 mpp[tiflash] not(isnull(test.t.a)), not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2, t3]), broadcast_join(t1, t2, t3), leading(t2, t3) */ * from t t1, t t2, t t3 where t1.a=t2.a and t2.b=t3.b", + "Plan": [ + "TableReader 15593.77 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 15593.77 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 15593.77 mpp[tiflash] test.t.a, test.t.b, test.t.c, test.t.a, test.t.b, test.t.c, test.t.a, test.t.b, test.t.c", + " └─HashJoin 15593.77 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─HashJoin(Probe) 12475.01 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", + " ├─ExchangeReceiver(Build) 9980.01 mpp[tiflash] ", + " │ └─ExchangeSender 9980.01 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9980.01 mpp[tiflash] not(isnull(test.t.a)), not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t3 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ qb_name(qb, v), MPP_1PHASE_AGG(@qb) */ * from v", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#10, Column#9, funcs:sum(Column#8)->Column#5, funcs:firstrow(Column#9)->test.t.a", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#8, test.t.a->Column#9, test.t.c->Column#10", + " └─ExchangeReceiver 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ qb_name(qb, v), MPP_2PHASE_AGG(@qb) */ * from v", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#10)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, Column#14, funcs:sum(Column#12)->Column#10", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#12, test.t.a->Column#13, test.t.c->Column#14", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ qb_name(qb, v1), shuffle_join(t1@qb, t2@qb) */ * from v1", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12487.50 mpp[tiflash] test.t.a", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ qb_name(qb, v1), broadcast_join(t1@qb, t2@qb) */ * from v1", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12487.50 mpp[tiflash] test.t.a", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "SELECT /*+ shuffle_join(t) */ * FROM t WHERE EXISTS (SELECT /*+ SEMI_JOIN_REWRITE() */ 1 FROM t t1 WHERE t1.b = t.b);", + "Plan": [ + "TableReader 9990.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 9990.00 mpp[tiflash] test.t.a, test.t.b, test.t.c", + " └─HashJoin 9990.00 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", + " ├─Projection(Build) 7992.00 mpp[tiflash] test.t.b", + " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, funcs:firstrow(test.t.b)->test.t.b", + " │ └─ExchangeReceiver 7992.00 mpp[tiflash] ", + " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, ", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "SELECT /*+ broadcast_join(t) */ * FROM t WHERE EXISTS (SELECT /*+ SEMI_JOIN_REWRITE() */ 1 FROM t t1 WHERE t1.b = t.b);", + "Plan": [ + "TableReader 9990.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 9990.00 mpp[tiflash] test.t.a, test.t.b, test.t.c", + " └─HashJoin 9990.00 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", + " ├─ExchangeReceiver(Build) 7992.00 mpp[tiflash] ", + " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 7992.00 mpp[tiflash] test.t.b", + " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, funcs:firstrow(test.t.b)->test.t.b", + " │ └─ExchangeReceiver 7992.00 mpp[tiflash] ", + " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, ", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select * from t t1 where t1.a < (select /*+ MPP_1PHASE_AGG() */ sum(t2.a) from t t2 where t2.b = t1.b);", + "Plan": [ + "TableReader 9990.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 9990.00 mpp[tiflash] test.t.a, test.t.b, test.t.c", + " └─HashJoin 9990.00 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)], other cond:lt(cast(test.t.a, decimal(10,0) BINARY), Column#9)", + " ├─ExchangeReceiver(Build) 7992.00 mpp[tiflash] ", + " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 7992.00 mpp[tiflash] Column#9, test.t.b", + " │ └─HashAgg 7992.00 mpp[tiflash] group by:Column#32, funcs:sum(Column#31)->Column#9, funcs:firstrow(Column#32)->test.t.b", + " │ └─Projection 9990.00 mpp[tiflash] cast(test.t.a, decimal(10,0) BINARY)->Column#31, test.t.b->Column#32", + " │ └─ExchangeReceiver 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select * from t t1 where t1.a < (select /*+ MPP_2PHASE_AGG() */ sum(t2.a) from t t2 where t2.b = t1.b);", + "Plan": [ + "TableReader 9990.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 9990.00 mpp[tiflash] test.t.a, test.t.b, test.t.c", + " └─HashJoin 9990.00 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)], other cond:lt(cast(test.t.a, decimal(10,0) BINARY), Column#9)", + " ├─ExchangeReceiver(Build) 7992.00 mpp[tiflash] ", + " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 7992.00 mpp[tiflash] Column#9, test.t.b", + " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, funcs:sum(Column#20)->Column#9, funcs:firstrow(test.t.b)->test.t.b", + " │ └─ExchangeReceiver 7992.00 mpp[tiflash] ", + " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " │ └─HashAgg 7992.00 mpp[tiflash] group by:Column#36, funcs:sum(Column#35)->Column#20", + " │ └─Projection 9990.00 mpp[tiflash] cast(test.t.a, decimal(10,0) BINARY)->Column#35, test.t.b->Column#36", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.b))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "WITH CTE AS (SELECT /*+ MPP_1PHASE_AGG() */ count(*) as a, b FROM t WHERE t.a < 60 group by b) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", + "Plan": [ + "HashAgg 3403.09 root group by:Column#10, Column#11, funcs:firstrow(Column#10)->Column#10, funcs:firstrow(Column#11)->Column#11", + "└─Union 3403.09 root ", + " ├─Selection 1701.55 root lt(Column#6, 18)", + " │ └─CTEFullScan 2126.93 root CTE:cte data:CTE_0", + " └─Selection 1701.55 root gt(test.t.b, 1)", + " └─CTEFullScan 2126.93 root CTE:cte data:CTE_0", + "CTE_0 2126.93 root Non-Recursive CTE", + "└─TableReader(Seed Part) 2126.93 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 2126.93 mpp[tiflash] ExchangeType: PassThrough", + " └─Selection 2126.93 mpp[tiflash] or(lt(Column#5, 18), gt(test.t.b, 1))", + " └─Projection 2658.67 mpp[tiflash] Column#5, test.t.b", + " └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#5, funcs:firstrow(test.t.b)->test.t.b", + " └─ExchangeReceiver 3323.33 mpp[tiflash] ", + " └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 60)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "WITH CTE AS (SELECT /*+ MPP_2PHASE_AGG() */ count(*) as a, b FROM t WHERE t.a < 60 group by b) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", + "Plan": [ + "HashAgg 3403.09 root group by:Column#10, Column#11, funcs:firstrow(Column#10)->Column#10, funcs:firstrow(Column#11)->Column#11", + "└─Union 3403.09 root ", + " ├─Selection 1701.55 root lt(Column#6, 18)", + " │ └─CTEFullScan 2126.93 root CTE:cte data:CTE_0", + " └─Selection 1701.55 root gt(test.t.b, 1)", + " └─CTEFullScan 2126.93 root CTE:cte data:CTE_0", + "CTE_0 2126.93 root Non-Recursive CTE", + "└─TableReader(Seed Part) 2126.93 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 2126.93 mpp[tiflash] ExchangeType: PassThrough", + " └─Selection 2126.93 mpp[tiflash] or(lt(Column#5, 18), gt(test.t.b, 1))", + " └─Projection 2658.67 mpp[tiflash] Column#5, test.t.b", + " └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:sum(Column#22)->Column#5, funcs:firstrow(test.t.b)->test.t.b", + " └─ExchangeReceiver 2658.67 mpp[tiflash] ", + " └─ExchangeSender 2658.67 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#22", + " └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 60)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "WITH CTE AS (SELECT /*+ shuffle_join(t1, t) */ t.a, t.b FROM t join t t1 where t.a = t1.a) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", + "Plan": [ + "HashAgg 7095.48 root group by:Column#13, Column#14, funcs:firstrow(Column#13)->Column#13, funcs:firstrow(Column#14)->Column#14", + "└─Union 11086.68 root ", + " ├─Selection 5543.34 root lt(test.t.a, 18)", + " │ └─CTEFullScan 6929.18 root CTE:cte data:CTE_0", + " └─Selection 5543.34 root gt(test.t.b, 1)", + " └─CTEFullScan 6929.18 root CTE:cte data:CTE_0", + "CTE_0 6929.18 root Non-Recursive CTE", + "└─TableReader(Seed Part) 6929.18 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 6929.18 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 6929.18 mpp[tiflash] test.t.a, test.t.b", + " └─HashJoin 6929.18 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)], other cond:or(lt(test.t.a, 18), gt(test.t.b, 1))", + " ├─ExchangeReceiver(Build) 5543.34 mpp[tiflash] ", + " │ └─ExchangeSender 5543.34 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 5543.34 mpp[tiflash] not(isnull(test.t.a)), or(lt(test.t.a, 18), gt(test.t.b, 1))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "WITH CTE AS (SELECT /*+ broadcast_join(t1, t) */ t.a, t.b FROM t join t t1 where t.a = t1.a) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", + "Plan": [ + "HashAgg 7095.48 root group by:Column#13, Column#14, funcs:firstrow(Column#13)->Column#13, funcs:firstrow(Column#14)->Column#14", + "└─Union 11086.68 root ", + " ├─Selection 5543.34 root lt(test.t.a, 18)", + " │ └─CTEFullScan 6929.18 root CTE:cte data:CTE_0", + " └─Selection 5543.34 root gt(test.t.b, 1)", + " └─CTEFullScan 6929.18 root CTE:cte data:CTE_0", + "CTE_0 6929.18 root Non-Recursive CTE", + "└─TableReader(Seed Part) 6929.18 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 6929.18 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 6929.18 mpp[tiflash] test.t.a, test.t.b", + " └─HashJoin 6929.18 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)], other cond:or(lt(test.t.a, 18), gt(test.t.b, 1))", + " ├─ExchangeReceiver(Build) 5543.34 mpp[tiflash] ", + " │ └─ExchangeSender 5543.34 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 5543.34 mpp[tiflash] not(isnull(test.t.a)), or(lt(test.t.a, 18), gt(test.t.b, 1))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "WITH CTE AS (SELECT /*+ MERGE(), MPP_1PHASE_AGG() */ count(*) as a, b FROM t WHERE t.a < 60 group by b) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", + "Plan": [ + "TableReader 3013.16 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 3013.16 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 3013.16 mpp[tiflash] Column#20, Column#21", + " └─HashAgg 3013.16 mpp[tiflash] group by:Column#20, Column#21, funcs:firstrow(Column#20)->Column#20, funcs:firstrow(Column#21)->Column#21", + " └─ExchangeReceiver 3013.16 mpp[tiflash] ", + " └─ExchangeSender 3013.16 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#20, collate: binary], [name: Column#21, collate: binary]", + " └─Union 3013.16 mpp[tiflash] ", + " ├─Selection 2126.93 mpp[tiflash] lt(Column#12, 18)", + " │ └─Projection 2658.67 mpp[tiflash] Column#12, test.t.b", + " │ └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#12, funcs:firstrow(test.t.b)->test.t.b", + " │ └─ExchangeReceiver 3323.33 mpp[tiflash] ", + " │ └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 60)", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", + " └─Projection 886.22 mpp[tiflash] Column#19->Column#20, test.t.b->Column#21", + " └─HashAgg 886.22 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#19, funcs:firstrow(test.t.b)->test.t.b", + " └─ExchangeReceiver 1107.78 mpp[tiflash] ", + " └─ExchangeSender 1107.78 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " └─Selection 1107.78 mpp[tiflash] gt(test.t.b, 1), lt(test.t.a, 60)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "WITH CTE AS (SELECT /*+ MERGE(), MPP_2PHASE_AGG() */ count(*) as a, b FROM t WHERE t.a < 60 group by b) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", + "Plan": [ + "TableReader 3013.16 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 3013.16 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 3013.16 mpp[tiflash] Column#20, Column#21", + " └─HashAgg 3013.16 mpp[tiflash] group by:Column#20, Column#21, funcs:firstrow(Column#20)->Column#20, funcs:firstrow(Column#21)->Column#21", + " └─ExchangeReceiver 3013.16 mpp[tiflash] ", + " └─ExchangeSender 3013.16 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#20, collate: binary], [name: Column#21, collate: binary]", + " └─Union 3013.16 mpp[tiflash] ", + " ├─Selection 2126.93 mpp[tiflash] lt(Column#12, 18)", + " │ └─Projection 2658.67 mpp[tiflash] Column#12, test.t.b", + " │ └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:sum(Column#32)->Column#12, funcs:firstrow(test.t.b)->test.t.b", + " │ └─ExchangeReceiver 2658.67 mpp[tiflash] ", + " │ └─ExchangeSender 2658.67 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " │ └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#32", + " │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 60)", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", + " └─Projection 886.22 mpp[tiflash] Column#19->Column#20, test.t.b->Column#21", + " └─HashAgg 886.22 mpp[tiflash] group by:test.t.b, funcs:sum(Column#46)->Column#19, funcs:firstrow(test.t.b)->test.t.b", + " └─ExchangeReceiver 886.22 mpp[tiflash] ", + " └─ExchangeSender 886.22 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", + " └─HashAgg 886.22 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#46", + " └─Selection 1107.78 mpp[tiflash] gt(test.t.b, 1), lt(test.t.a, 60)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "WITH CTE AS (SELECT /*+ MERGE(), shuffle_join(t1, t) */ t.a, t.b FROM t join t t1 where t.a = t1.a) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", + "Plan": [ + "TableReader 5322.67 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 5322.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 5322.67 mpp[tiflash] Column#29, Column#30", + " └─HashAgg 5322.67 mpp[tiflash] group by:Column#29, Column#30, funcs:firstrow(Column#29)->Column#29, funcs:firstrow(Column#30)->Column#30", + " └─ExchangeReceiver 5322.67 mpp[tiflash] ", + " └─ExchangeSender 5322.67 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#29, collate: binary], [name: Column#30, collate: binary]", + " └─HashAgg 5322.67 mpp[tiflash] group by:Column#29, Column#30, ", + " └─Union 8316.67 mpp[tiflash] ", + " ├─Projection 4154.17 mpp[tiflash] test.t.a->Column#29, test.t.b->Column#30", + " │ └─HashJoin 4154.17 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " │ ├─ExchangeReceiver(Build) 3323.33 mpp[tiflash] ", + " │ │ └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 18), not(isnull(test.t.a))", + " │ │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", + " │ └─ExchangeReceiver(Probe) 3323.33 mpp[tiflash] ", + " │ └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 18), not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Projection 4162.50 mpp[tiflash] test.t.a->Column#29, test.t.b->Column#30", + " └─HashJoin 4162.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 3330.00 mpp[tiflash] ", + " │ └─ExchangeSender 3330.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 3330.00 mpp[tiflash] gt(test.t.b, 1), not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "WITH CTE AS (SELECT /*+ MERGE(), broadcast_join(t1, t) */ t.a, t.b FROM t join t t1 where t.a = t1.a) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", + "Plan": [ + "TableReader 5322.67 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 5322.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 5322.67 mpp[tiflash] Column#29, Column#30", + " └─HashAgg 5322.67 mpp[tiflash] group by:Column#29, Column#30, funcs:firstrow(Column#29)->Column#29, funcs:firstrow(Column#30)->Column#30", + " └─ExchangeReceiver 5322.67 mpp[tiflash] ", + " └─ExchangeSender 5322.67 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#29, collate: binary], [name: Column#30, collate: binary]", + " └─HashAgg 5322.67 mpp[tiflash] group by:Column#29, Column#30, ", + " └─Union 8316.67 mpp[tiflash] ", + " ├─Projection 4154.17 mpp[tiflash] test.t.a->Column#29, test.t.b->Column#30", + " │ └─HashJoin 4154.17 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " │ ├─ExchangeReceiver(Build) 3323.33 mpp[tiflash] ", + " │ │ └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 18), not(isnull(test.t.a))", + " │ │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", + " │ └─Selection(Probe) 3323.33 mpp[tiflash] lt(test.t.a, 18), not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Projection 4162.50 mpp[tiflash] test.t.a->Column#29, test.t.b->Column#30", + " └─HashJoin 4162.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 3330.00 mpp[tiflash] ", + " │ └─ExchangeSender 3330.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 3330.00 mpp[tiflash] gt(test.t.b, 1), not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t2) */ * from t t1 left join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t2), hash_join_probe(t2) */ * from t t1 left join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]Join hints are conflict, you can only specify one type of join" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t1) */ * from t t1 right join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_probe(t2) */ * from t t1 left join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints", + "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints" + ] + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_probe(t1) */ * from t t1 right join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints", + "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints" + ] + }, + { + "SQL": "set @@session.tidb_opt_mpp_outer_join_fixed_build_side = 1", + "Plan": null, + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t2) */ * from t t1 left join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t1) */ * from t t1 right join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t2) */ * from t t1 left join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t1) */ * from t t1 right join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "set @@session.tidb_opt_mpp_outer_join_fixed_build_side = 0", + "Plan": null, + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t2) */ * from t t1 left join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t1) */ * from t t1 right join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t2) */ * from t t1 left join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t1) */ * from t t1 right join t t2 on t1.a=t2.a", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ shuffle_join(t1, t2@sel_2), hash_join_build(t2@sel_2) */ a from t t1 where t1.a>1 or t1.a in (select a from t t2);", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a", + " └─Selection 8000.00 mpp[tiflash] or(gt(test.t.a, 1), Column#9)", + " └─HashJoin 10000.00 mpp[tiflash] CARTESIAN left outer semi join, other cond:eq(test.t.a, test.t.a)", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]We can't use the HASH_JOIN_BUILD or HASH_JOIN_PROBE hint for left outer semi join, please check the hint" + ] + }, + { + "SQL": "select /*+ shuffle_join(t1, t2@sel_2), hash_join_build(t1) */ a from t t1 where t1.a>1 or t1.a not in (select a from t t2);", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a", + " └─Selection 8000.00 mpp[tiflash] or(gt(test.t.a, 1), Column#9)", + " └─HashJoin 10000.00 mpp[tiflash] Null-aware anti left outer semi join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints", + "[planner:1815]We can't use the HASH_JOIN_BUILD or HASH_JOIN_PROBE hint for anti left outer semi join, please check the hint", + "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints" + ] + }, + { + "SQL": "select /*+ shuffle_join(t1, t2@sel_2), hash_join_probe(t2@sel_2) */ a from t t1 where t1.a>1 or t1.a in (select a from t t2);", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a", + " └─Selection 8000.00 mpp[tiflash] or(gt(test.t.a, 1), Column#9)", + " └─HashJoin 10000.00 mpp[tiflash] CARTESIAN left outer semi join, other cond:eq(test.t.a, test.t.a)", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints", + "[planner:1815]We can't use the HASH_JOIN_BUILD or HASH_JOIN_PROBE hint for left outer semi join, please check the hint", + "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints" + ] + }, + { + "SQL": "select /*+ shuffle_join(t1, t2@sel_2), hash_join_probe(t1) */ a from t t1 where t1.a>1 or t1.a not in (select a from t t2);", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a", + " └─Selection 8000.00 mpp[tiflash] or(gt(test.t.a, 1), Column#9)", + " └─HashJoin 10000.00 mpp[tiflash] Null-aware anti left outer semi join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]We can't use the HASH_JOIN_BUILD or HASH_JOIN_PROBE hint for anti left outer semi join, please check the hint" + ] + } + ] + }, + { + "Name": "TestMPPHintsScope", + "Cases": [ + { + "SQL": "set @@session.tidb_allow_mpp=true", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select /*+ MPP_1PHASE_AGG() */ a, sum(b) from t group by a, c", + "Plan": [ + "TableReader_31 8000.00 root MppVersion: 2, data:ExchangeSender_30", + "└─ExchangeSender_30 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection_5 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection_29 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg_27 8000.00 mpp[tiflash] group by:Column#10, Column#9, funcs:sum(Column#8)->Column#5, funcs:firstrow(Column#9)->test.t.a", + " └─Projection_32 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#8, test.t.a->Column#9, test.t.c->Column#10", + " └─ExchangeReceiver_23 10000.00 mpp[tiflash] ", + " └─ExchangeSender_22 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─TableFullScan_21 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain select /*+ MPP_2PHASE_AGG() */ a, sum(b) from t group by a, c", + "Plan": [ + "TableReader_35 8000.00 root MppVersion: 2, data:ExchangeSender_34", + "└─ExchangeSender_34 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection_5 8000.00 mpp[tiflash] test.t.a, Column#5", + " └─Projection_30 8000.00 mpp[tiflash] Column#5, test.t.a", + " └─HashAgg_31 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#10)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─ExchangeReceiver_33 8000.00 mpp[tiflash] ", + " └─ExchangeSender_32 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", + " └─HashAgg_29 8000.00 mpp[tiflash] group by:Column#13, Column#14, funcs:sum(Column#12)->Column#10", + " └─Projection_36 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#12, test.t.a->Column#13, test.t.c->Column#14", + " └─TableFullScan_21 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain select /*+ shuffle_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_22 12487.50 root MppVersion: 2, data:ExchangeSender_21", + "└─ExchangeSender_21 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_20 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_13(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_12 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection_11 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_10 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver_17(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender_16 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection_15 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_14 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain select /*+ broadcast_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_20 12487.50 root MppVersion: 2, data:ExchangeSender_19", + "└─ExchangeSender_19 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_18 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_13(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_12 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_11 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_10 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection_15(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_14 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "set @@session.tidb_enforce_mpp=true", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select /*+ hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_69 12487.50 root MppVersion: 2, data:ExchangeSender_68", + "└─ExchangeSender_68 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_61 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_65(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_64 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_63 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_62 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection_67(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_66 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain select /*+ merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "MergeJoin_10 12487.50 root inner join, left key:test.t.a, right key:test.t.a", + "├─Projection_19(Build) 9990.00 root test.t.a, test.t.b, test.t.c", + "│ └─IndexLookUp_18 9990.00 root ", + "│ ├─IndexFullScan_16(Build) 9990.00 cop[tikv] table:t2, index:idx_a(a) keep order:true, stats:pseudo", + "│ └─TableRowIDScan_17(Probe) 9990.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─Projection_15(Probe) 9990.00 root test.t.a, test.t.b, test.t.c", + " └─IndexLookUp_14 9990.00 root ", + " ├─IndexFullScan_12(Build) 9990.00 cop[tikv] table:t1, index:idx_a(a) keep order:true, stats:pseudo", + " └─TableRowIDScan_13(Probe) 9990.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because you have used hint to specify a join algorithm which is not supported by mpp now.", + "MPP mode may be blocked because you have used hint to specify a join algorithm which is not supported by mpp now." + ] + }, + { + "SQL": "set @@session.tidb_enforce_mpp=false", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select /*+ hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_69 12487.50 root MppVersion: 2, data:ExchangeSender_68", + "└─ExchangeSender_68 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_61 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_65(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_64 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_63 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_62 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection_67(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_66 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain select /*+ merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "MergeJoin_10 12487.50 root inner join, left key:test.t.a, right key:test.t.a", + "├─Projection_19(Build) 9990.00 root test.t.a, test.t.b, test.t.c", + "│ └─IndexLookUp_18 9990.00 root ", + "│ ├─IndexFullScan_16(Build) 9990.00 cop[tikv] table:t2, index:idx_a(a) keep order:true, stats:pseudo", + "│ └─TableRowIDScan_17(Probe) 9990.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─Projection_15(Probe) 9990.00 root test.t.a, test.t.b, test.t.c", + " └─IndexLookUp_14 9990.00 root ", + " ├─IndexFullScan_12(Build) 9990.00 cop[tikv] table:t1, index:idx_a(a) keep order:true, stats:pseudo", + " └─TableRowIDScan_13(Probe) 9990.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain select /*+ read_from_storage(tiflash[t1, t2]) hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_29 12487.50 root MppVersion: 2, data:ExchangeSender_28", + "└─ExchangeSender_28 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_21 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_25(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_24 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_23 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_22 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection_27(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_26 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain select /*+ read_from_storage(tiflash[t1, t2]) merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "MergeJoin_11 12487.50 root inner join, left key:test.t.a, right key:test.t.a", + "├─Sort_21(Build) 9990.00 root test.t.a", + "│ └─TableReader_20 9990.00 root MppVersion: 2, data:ExchangeSender_19", + "│ └─ExchangeSender_19 9990.00 mpp[tiflash] ExchangeType: PassThrough", + "│ └─Selection_18 9990.00 mpp[tiflash] not(isnull(test.t.a))", + "│ └─TableFullScan_17 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", + "└─Sort_16(Probe) 9990.00 root test.t.a", + " └─TableReader_15 9990.00 root MppVersion: 2, data:ExchangeSender_14", + " └─ExchangeSender_14 9990.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Selection_13 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_12 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "set @@session.tidb_allow_mpp=false", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select /*+ MPP_1PHASE_AGG() */ a, sum(b) from t group by a, c", + "Plan": [ + "Projection_4 8000.00 root test.t.a, Column#5", + "└─HashAgg_10 8000.00 root group by:test.t.a, test.t.c, funcs:sum(Column#6)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─TableReader_11 8000.00 root data:HashAgg_5", + " └─HashAgg_5 8000.00 cop[tikv] group by:test.t.a, test.t.c, funcs:sum(test.t.b)->Column#6", + " └─TableFullScan_8 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]The agg can not push down to the MPP side, the MPP_1PHASE_AGG() hint is invalid" + ] + }, + { + "SQL": "explain select /*+ MPP_2PHASE_AGG() */ a, sum(b) from t group by a, c", + "Plan": [ + "Projection_4 8000.00 root test.t.a, Column#5", + "└─HashAgg_10 8000.00 root group by:test.t.a, test.t.c, funcs:sum(Column#6)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─TableReader_11 8000.00 root data:HashAgg_5", + " └─HashAgg_5 8000.00 cop[tikv] group by:test.t.a, test.t.c, funcs:sum(test.t.b)->Column#6", + " └─TableFullScan_8 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]The agg can not push down to the MPP side, the MPP_2PHASE_AGG() hint is invalid" + ] + }, + { + "SQL": "explain select /*+ shuffle_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "HashJoin_37 12487.50 root inner join, equal:[eq(test.t.a, test.t.a)]", + "├─TableReader_56(Build) 9990.00 root data:Selection_55", + "│ └─Selection_55 9990.00 cop[tikv] not(isnull(test.t.a))", + "│ └─TableFullScan_54 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─TableReader_49(Probe) 9990.00 root data:Selection_48", + " └─Selection_48 9990.00 cop[tikv] not(isnull(test.t.a))", + " └─TableFullScan_47 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]The join can not push down to the MPP side, the shuffle_join() hint is invalid" + ] + }, + { + "SQL": "explain select /*+ broadcast_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "HashJoin_37 12487.50 root inner join, equal:[eq(test.t.a, test.t.a)]", + "├─TableReader_56(Build) 9990.00 root data:Selection_55", + "│ └─Selection_55 9990.00 cop[tikv] not(isnull(test.t.a))", + "│ └─TableFullScan_54 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─TableReader_49(Probe) 9990.00 root data:Selection_48", + " └─Selection_48 9990.00 cop[tikv] not(isnull(test.t.a))", + " └─TableFullScan_47 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]The join can not push down to the MPP side, the broadcast_join() hint is invalid" + ] + } + ] + }, + { + "Name": "TestMPPBCJModel", + "Cases": [ + { + "SQL": "set @@session.tidb_allow_mpp=true", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_79 12487.50 root MppVersion: 2, data:ExchangeSender_78", + "└─ExchangeSender_78 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_77 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_44(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_43 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_42 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_41 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection_46(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_45 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=1", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_81 12487.50 root MppVersion: 2, data:ExchangeSender_80", + "└─ExchangeSender_80 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_79 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_44(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_43 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection_42 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_41 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver_48(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender_47 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection_46 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_45 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + } + ] + }, + { + "Name": "TestMPPPreferBCJ", + "Cases": [ + { + "SQL": "explain select * from t1, t2 where t1.a=t2.b", + "Plan": [ + "TableReader_36 1.00 root MppVersion: 2, data:ExchangeSender_35", + "└─ExchangeSender_35 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_34 1.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.b)]", + " ├─ExchangeReceiver_15(Build) 1.00 mpp[tiflash] ", + " │ └─ExchangeSender_14 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_13 1.00 mpp[tiflash] not(isnull(test.t1.a))", + " │ └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", + " └─Selection_17(Probe) 8.00 mpp[tiflash] not(isnull(test.t2.b))", + " └─TableFullScan_16 8.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false" + ], + "Warn": null + }, + { + "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=1", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t1, t2 where t1.a=t2.b", + "Plan": [ + "TableReader_38 1.00 root MppVersion: 2, data:ExchangeSender_37", + "└─ExchangeSender_37 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_36 1.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.b)]", + " ├─ExchangeReceiver_15(Build) 1.00 mpp[tiflash] ", + " │ └─ExchangeSender_14 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t1.a, collate: binary]", + " │ └─Selection_13 1.00 mpp[tiflash] not(isnull(test.t1.a))", + " │ └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", + " └─ExchangeReceiver_19(Probe) 8.00 mpp[tiflash] ", + " └─ExchangeSender_18 8.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t2.b, collate: binary]", + " └─Selection_17 8.00 mpp[tiflash] not(isnull(test.t2.b))", + " └─TableFullScan_16 8.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false" + ], + "Warn": null + }, + { + "SQL": "insert into t2 values (9); analyze table t2;", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t1, t2 where t1.a=t2.b", + "Plan": [ + "TableReader_36 1.00 root MppVersion: 2, data:ExchangeSender_35", + "└─ExchangeSender_35 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_34 1.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.b)]", + " ├─ExchangeReceiver_15(Build) 1.00 mpp[tiflash] ", + " │ └─ExchangeSender_14 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_13 1.00 mpp[tiflash] not(isnull(test.t1.a))", + " │ └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", + " └─Selection_17(Probe) 9.00 mpp[tiflash] not(isnull(test.t2.b))", + " └─TableFullScan_16 9.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false" + ], + "Warn": null + } + ] + }, + { + "Name": "TestMPPBCJModelOneTiFlash", + "Cases": [ + { + "SQL": "set @@session.tidb_allow_mpp=true", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_81 12487.50 root MppVersion: 2, data:ExchangeSender_80", + "└─ExchangeSender_80 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_79 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_44(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_43 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " │ └─Selection_42 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_41 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver_48(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender_47 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─Selection_46 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_45 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=1", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t t1, t t2 where t1.a=t2.a", + "Plan": [ + "TableReader_79 12487.50 root MppVersion: 2, data:ExchangeSender_78", + "└─ExchangeSender_78 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_77 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─ExchangeReceiver_44(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender_43 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_42 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " │ └─TableFullScan_41 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection_46(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_45 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warn": null + } + ] + }, + { + "Name": "TestMPPRightSemiJoin", + "Cases": [ + { + "SQL": "set @@session.tidb_allow_mpp=true", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t1 where exists (select * from t2 where t1.a=t2.b)", + "Plan": [ + "TableReader_36 0.80 root MppVersion: 2, data:ExchangeSender_35", + "└─ExchangeSender_35 0.80 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_34 0.80 mpp[tiflash] semi join, equal:[eq(test.t1.a, test.t2.b)]", + " ├─ExchangeReceiver_17(Build) 8.00 mpp[tiflash] ", + " │ └─ExchangeSender_16 8.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_15 8.00 mpp[tiflash] not(isnull(test.t2.b))", + " │ └─TableFullScan_14 8.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false", + " └─Selection_13(Probe) 1.00 mpp[tiflash] not(isnull(test.t1.a))", + " └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false" + ], + "Warn": null + }, + { + "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@session.tidb_broadcast_join_threshold_size=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@session.tidb_broadcast_join_threshold_count=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t1 where exists (select * from t2 where t1.a=t2.b)", + "Plan": [ + "TableReader_38 0.80 root MppVersion: 2, data:ExchangeSender_37", + "└─ExchangeSender_37 0.80 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_36 0.80 mpp[tiflash] semi join, equal:[eq(test.t1.a, test.t2.b)]", + " ├─ExchangeReceiver_15(Build) 1.00 mpp[tiflash] ", + " │ └─ExchangeSender_14 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t1.a, collate: binary]", + " │ └─Selection_13 1.00 mpp[tiflash] not(isnull(test.t1.a))", + " │ └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", + " └─ExchangeReceiver_19(Probe) 8.00 mpp[tiflash] ", + " └─ExchangeSender_18 8.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t2.b, collate: binary]", + " └─Selection_17 8.00 mpp[tiflash] not(isnull(test.t2.b))", + " └─TableFullScan_16 8.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false" + ], + "Warn": null + } + ] + }, + { + "Name": "TestMPPRightOuterJoin", + "Cases": [ + { + "SQL": "set @@session.tidb_allow_mpp=true", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t1 right join t2 on t1.a=t2.b and t1.c < t2.d", + "Plan": [ + "TableReader_32 3.00 root MppVersion: 2, data:ExchangeSender_31", + "└─ExchangeSender_31 3.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_30 3.00 mpp[tiflash] right outer join, equal:[eq(test.t1.a, test.t2.b)], other cond:lt(test.t1.c, test.t2.d)", + " ├─ExchangeReceiver_14(Build) 5.00 mpp[tiflash] ", + " │ └─ExchangeSender_13 5.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection_12 5.00 mpp[tiflash] not(isnull(test.t1.a)), not(isnull(test.t1.c))", + " │ └─TableFullScan_11 5.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", + " └─TableFullScan_15(Probe) 3.00 mpp[tiflash] table:t2 keep order:false" + ], + "Warn": null + }, + { + "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@session.tidb_broadcast_join_threshold_size=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@session.tidb_broadcast_join_threshold_count=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select * from t1 right join t2 on t1.a=t2.b and t1.c < t2.d", + "Plan": [ + "TableReader_34 3.00 root MppVersion: 2, data:ExchangeSender_33", + "└─ExchangeSender_33 3.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin_32 3.00 mpp[tiflash] right outer join, equal:[eq(test.t1.a, test.t2.b)], other cond:lt(test.t1.c, test.t2.d)", + " ├─ExchangeReceiver_17(Build) 3.00 mpp[tiflash] ", + " │ └─ExchangeSender_16 3.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t2.b, collate: binary]", + " │ └─TableFullScan_15 3.00 mpp[tiflash] table:t2 keep order:false", + " └─ExchangeReceiver_14(Probe) 5.00 mpp[tiflash] ", + " └─ExchangeSender_13 5.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t1.a, collate: binary]", + " └─Selection_12 5.00 mpp[tiflash] not(isnull(test.t1.a)), not(isnull(test.t1.c))", + " └─TableFullScan_11 5.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false" + ], + "Warn": null + } + ] + }, + { + "Name": "TestIssue37520", + "Cases": [ + { + "SQL": "select /*+ inl_join(t1@sel_2) */ a, (select b from t1 where t1.a = t2.b) from t2;", + "Plan": [ + "IndexJoin 12500.00 root left outer join, inner:TableReader, outer key:test.t2.b, inner key:test.t1.a, equal cond:eq(test.t2.b, test.t1.a)", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableRangeScan", + " └─TableRangeScan 10000.00 cop[tikv] table:t1 range: decided by [test.t2.b], keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ inl_join(t2) */ a, (select b from t1 where t1.a = t2.b) from t2;", + "Plan": [ + "HashJoin 12500.00 root left outer join, equal:[eq(test.t2.b, test.t1.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Warn": [ + "[planner:1815]Optimizer Hint /*+ INL_JOIN(t2) */ or /*+ TIDB_INLJ(t2) */ is inapplicable" + ] + }, + { + "SQL": "select /*+ inl_join(t2@sel_2) */ * from t1 where exists ( select /*+ semi_join_rewrite() */ * from t2 where t1.a = t2.a);", + "Plan": [ + "IndexJoin 9990.00 root inner join, inner:HashAgg, outer key:test.t1.a, inner key:test.t2.a, equal cond:eq(test.t1.a, test.t2.a)", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─HashAgg(Probe) 79920000.00 root group by:test.t2.a, funcs:firstrow(test.t2.a)->test.t2.a", + " └─IndexReader 79920000.00 root index:HashAgg", + " └─HashAgg 79920000.00 cop[tikv] group by:test.t2.a, ", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", + " └─IndexRangeScan 10000.00 cop[tikv] table:t2, index:ia(a) range: decided by [eq(test.t2.a, test.t1.a)], keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "select /*+ inl_join(t1) */ * from t1 where exists ( select /*+ semi_join_rewrite() */ * from t2 where t1.a = t2.a);", + "Plan": [ + "IndexJoin 9990.00 root inner join, inner:TableReader, outer key:test.t2.a, inner key:test.t1.a, equal cond:eq(test.t2.a, test.t1.a)", + "├─StreamAgg(Build) 7992.00 root group by:test.t2.a, funcs:firstrow(test.t2.a)->test.t2.a", + "│ └─IndexReader 7992.00 root index:StreamAgg", + "│ └─StreamAgg 7992.00 cop[tikv] group by:test.t2.a, ", + "│ └─IndexFullScan 9990.00 cop[tikv] table:t2, index:ia(a) keep order:true, stats:pseudo", + "└─TableReader(Probe) 7992.00 root data:TableRangeScan", + " └─TableRangeScan 7992.00 cop[tikv] table:t1 range: decided by [test.t2.a], keep order:false, stats:pseudo" + ], + "Warn": null + } + ] + }, + { + "Name": "TestHintScope", + "Cases": [ + { + "SQL": "select /*+ MERGE_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ INL_JOIN(t3) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "MergeInnerJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ MERGE_JOIN(test.t1) */ t1.a, t1.b from t t1, (select /*+ INL_JOIN(test.t3) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "MergeInnerJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ MERGE_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ HASH_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "MergeInnerJoin{TableReader(Table(t))->LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)->Sort}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ INL_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ HASH_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "IndexJoin{TableReader(Table(t))->LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ INL_JOIN(test.t1) */ t1.a, t1.b from t t1, (select /*+ HASH_JOIN(test.t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "IndexJoin{TableReader(Table(t))->LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ INL_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ MERGE_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "IndexJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ HASH_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ MERGE_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "RightHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ HASH_JOIN(test.t1) */ t1.a, t1.b from t t1, (select /*+ MERGE_JOIN(test.t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "RightHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ HASH_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ INL_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "RightHashJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.c,test.t.a)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ MERGE_JOIN(t1) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "MergeInnerJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ INL_JOIN(t1) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "IndexJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ HASH_JOIN(t1) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Best": "RightHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" + }, + { + "SQL": "select /*+ HASH_JOIN(@sel_2 t1@sel_2, t2@sel_2), MERGE_JOIN(@sel_1 t1@sel_1, t2@sel_1) */ * from (select t1.a, t1.b from t t1, t t2 where t1.a = t2.a) t1, t t2 where t1.b = t2.b", + "Best": "MergeInnerJoin{LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.a,test.t.a)->Sort->TableReader(Table(t))->Sort}(test.t.b,test.t.b)" + }, + { + "SQL": "select /*+ STREAM_AGG() */ s, count(s) from (select /*+ HASH_AGG() */ sum(t1.a) as s from t t1, t t2 where t1.a = t2.b group by t1.a) p group by s", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->Projection->HashAgg->Sort->StreamAgg->Projection" + }, + { + "SQL": "select /*+ HASH_AGG() */ s, count(s) from (select /*+ STREAM_AGG() */ sum(t1.a) as s from t t1, t t2 where t1.a = t2.b group by t1.a) p group by s", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->Sort->Projection->StreamAgg->HashAgg->Projection" + }, + { + "SQL": "select /*+ HASH_AGG() */ s, count(s) from (select sum(t1.a) as s from t t1, t t2 where t1.a = t2.b group by t1.a) p group by s", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->Projection->HashAgg->HashAgg->Projection" + }, + { + "SQL": "select /*+ STREAM_AGG() */ s, count(s) from (select sum(t1.a) as s from t t1, t t2 where t1.a = t2.b group by t1.a) p group by s", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->Projection->HashAgg->Sort->StreamAgg->Projection" + } + ] + }, + { + "Name": "TestIndexHint", + "Cases": [ + { + "SQL": "select /*+ USE_INDEX(t, c_d_e) */ * from t", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), no_order_index(@`sel_1` `test`.`t` `c_d_e`)" + }, + { + "SQL": "select /*+ USE_INDEX(test.t, c_d_e) */ * from t", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), no_order_index(@`sel_1` `test`.`t` `c_d_e`)" + }, + { + "SQL": "select /*+ IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ IGNORE_INDEX(test.t, c_d_e) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t, c_d_e) */ * from t", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), no_order_index(@`sel_1` `test`.`t` `c_d_e`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(test.t, c_d_e) */ * from t", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), no_order_index(@`sel_1` `test`.`t` `c_d_e`)" + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e) */ * from t t1", + "Best": "TableReader(Table(t))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`)" + }, + { + "SQL": "select /*+ IGNORE_INDEX(t, c_d_e) */ t1.c from t t1 order by t1.c", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t1` `c_d_e`), order_index(@`sel_1` `test`.`t1` `c_d_e`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t, c_d_e) */ * from t t1", + "Best": "TableReader(Table(t))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`)" + }, + { + "SQL": "select /*+ USE_INDEX(t1, c_d_e) */ * from t t1", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t1` `c_d_e`), no_order_index(@`sel_1` `test`.`t1` `c_d_e`)" + }, + { + "SQL": "select /*+ IGNORE_INDEX(t1, c_d_e) */ t1.c from t t1 order by t1.c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t1, c_d_e) */ * from t t1", + "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t1` `c_d_e`), no_order_index(@`sel_1` `test`.`t1` `c_d_e`)" + }, + { + "SQL": "select /*+ USE_INDEX(t1, c_d_e), USE_INDEX(t2, f) */ * from t t1, t t2 where t1.a = t2.b", + "Best": "LeftHashJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))}(test.t.a,test.t.b)", + "HasWarn": false, + "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` `c_d_e`), no_order_index(@`sel_1` `test`.`t1` `c_d_e`), use_index(@`sel_1` `test`.`t2` `f`), no_order_index(@`sel_1` `test`.`t2` `f`)" + }, + { + "SQL": "select /*+ IGNORE_INDEX(t1, c_d_e), IGNORE_INDEX(t2, f), HASH_JOIN(t1) */ * from t t1, t t2 where t1.a = t2.b", + "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.b)", + "HasWarn": false, + "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_1` `test`.`t2` ), no_order_index(@`sel_1` `test`.`t2` `primary`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t1, c_d_e), FORCE_INDEX(t2, f) */ * from t t1, t t2 where t1.a = t2.b", + "Best": "LeftHashJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))}(test.t.a,test.t.b)", + "HasWarn": false, + "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` `c_d_e`), no_order_index(@`sel_1` `test`.`t1` `c_d_e`), use_index(@`sel_1` `test`.`t2` `f`), no_order_index(@`sel_1` `test`.`t2` `f`)" + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e, f, g) */ * from t order by f", + "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `f`), order_index(@`sel_1` `test`.`t` `f`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t, c_d_e, f, g) */ * from t order by f", + "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `f`), order_index(@`sel_1` `test`.`t` `f`)" + }, + { + "SQL": "select /*+ USE_INDEX(t) */ f from t where f > 10", + "Best": "TableReader(Table(t)->Sel([gt(test.t.f, 10)]))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t) */ f from t where f > 10", + "Best": "TableReader(Table(t)->Sel([gt(test.t.f, 10)]))", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ USE_INDEX(t, no_such_index) */ * from t", + "Best": "TableReader(Table(t))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ IGNORE_INDEX(t, no_such_index) */ * from t", + "Best": "TableReader(Table(t))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t, no_such_index) */ * from t", + "Best": "TableReader(Table(t))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e), IGNORE_INDEX(t, f) */ c from t order by c", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), order_index(@`sel_1` `test`.`t` `c_d_e`)" + }, + { + "SQL": "select /*+ USE_INDEX(t, f), IGNORE_INDEX(t, f) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e), IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ USE_INDEX(t, c_d_e, f), IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `f`), no_order_index(@`sel_1` `test`.`t` `f`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t, c_d_e), IGNORE_INDEX(t, f) */ c from t order by c", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), order_index(@`sel_1` `test`.`t` `c_d_e`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t, f), IGNORE_INDEX(t, f) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t, c_d_e), IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "Best": "TableReader(Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ FORCE_INDEX(t, c_d_e, f), IGNORE_INDEX(t, c_d_e) */ c from t order by c", + "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))->Sort", + "HasWarn": false, + "Hints": "use_index(@`sel_1` `test`.`t` `f`), no_order_index(@`sel_1` `test`.`t` `f`)" + } + ] + }, + { + "Name": "TestIndexMergeHint", + "Cases": [ + { + "SQL": "select /*+ USE_INDEX_MERGE(t, c_d_e, f_g) */ * from t where c < 1 or f > 2", + "Best": "IndexMergeReader(PartialPlans->[Index(t.c_d_e)[[-inf,1)], Index(t.f_g)[(2,+inf]]], TablePlan->Table(t))", + "HasWarn": false, + "Hints": "use_index_merge(@`sel_1` `t` `c_d_e`, `f_g`)" + }, + { + "SQL": "select /*+ USE_INDEX_MERGE(t, primary, f_g) */ * from t where a < 1 or f > 2", + "Best": "IndexMergeReader(PartialPlans->[Table(t), Index(t.f_g)[(2,+inf]]], TablePlan->Table(t))", + "HasWarn": false, + "Hints": "use_index_merge(@`sel_1` `t` `primary`, `f_g`)" + }, + { + "SQL": "select /*+ USE_INDEX_MERGE(t, primary, f_g, c_d_e) */ * from t where a < 1 or f > 2", + "Best": "IndexMergeReader(PartialPlans->[Table(t), Index(t.f_g)[(2,+inf]]], TablePlan->Table(t))", + "HasWarn": false, + "Hints": "use_index_merge(@`sel_1` `t` `primary`, `f_g`)" + }, + { + "SQL": "select /*+ NO_INDEX_MERGE(), USE_INDEX_MERGE(t, primary, f_g, c_d_e) */ * from t where a < 1 or f > 2", + "Best": "TableReader(Table(t)->Sel([or(lt(test.t.a, 1), gt(test.t.f, 2))]))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ USE_INDEX_MERGE(t1, c_d_e, f_g) */ * from t where c < 1 or f > 2", + "Best": "TableReader(Table(t)->Sel([or(lt(test.t.c, 1), gt(test.t.f, 2))]))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ NO_INDEX_MERGE(), USE_INDEX_MERGE(t, primary, f_g, c_d_e) */ * from t where a < 1 or f > 2", + "Best": "TableReader(Table(t)->Sel([or(lt(test.t.a, 1), gt(test.t.f, 2))]))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ USE_INDEX_MERGE(t) USE_INDEX_MERGE(t) */ * from t where c < 1 or f > 2", + "Best": "IndexMergeReader(PartialPlans->[Index(t.c_d_e)[[-inf,1)], Index(t.f)[(2,+inf]]], TablePlan->Table(t))", + "HasWarn": false, + "Hints": "use_index_merge(@`sel_1` `t` `c_d_e`, `f`)" + }, + { + "SQL": "select /*+ USE_INDEX_MERGE(db2.t) */ * from t where c < 1 or f > 2", + "Best": "TableReader(Table(t)->Sel([or(lt(test.t.c, 1), gt(test.t.f, 2))]))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + }, + { + "SQL": "select /*+ USE_INDEX_MERGE(db2.t, c_d_e, f_g) */ * from t where c < 1 or f > 2", + "Best": "TableReader(Table(t)->Sel([or(lt(test.t.c, 1), gt(test.t.f, 2))]))", + "HasWarn": true, + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" + } + ] + }, + { + "Name": "TestRefine", + "Cases": [ + { + "SQL": "select a from t where c is not null", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]])" + }, + { + "SQL": "select a from t where c >= 4", + "Best": "IndexReader(Index(t.c_d_e)[[4,+inf]]->Projection)" + }, + { + "SQL": "select a from t where c <= 4", + "Best": "IndexReader(Index(t.c_d_e)[[-inf,4]]->Projection)" + }, + { + "SQL": "select a from t where c = 4 and d = 5 and e = 6", + "Best": "PointGet(Index(t.c_d_e)[KindInt64 4 KindInt64 5 KindInt64 6])->Projection" + }, + { + "SQL": "select a from t where d = 4 and c = 5", + "Best": "IndexReader(Index(t.c_d_e)[[5 4,5 4]]->Projection)" + }, + { + "SQL": "select a from t where c = 4 and e < 5", + "Best": "IndexReader(Index(t.c_d_e)[[4,4]]->Sel([lt(test.t.e, 5)])->Projection)" + }, + { + "SQL": "select a from t where c = 4 and d <= 5 and d > 3", + "Best": "IndexReader(Index(t.c_d_e)[(4 3,4 5]]->Projection)" + }, + { + "SQL": "select a from t where d <= 5 and d > 3", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Sel([le(test.t.d, 5) gt(test.t.d, 3)])->Projection)" + }, + { + "SQL": "select a from t where c between 1 and 2", + "Best": "IndexReader(Index(t.c_d_e)[[1,2]]->Projection)" + }, + { + "SQL": "select a from t where c not between 1 and 2", + "Best": "IndexReader(Index(t.c_d_e)[[-inf,1) (2,+inf]]->Projection)" + }, + { + "SQL": "select a from t where c <= 5 and c >= 3 and d = 1", + "Best": "IndexReader(Index(t.c_d_e)[[3,5]]->Sel([eq(test.t.d, 1)])->Projection)" + }, + { + "SQL": "select a from t where c = 1 or c = 2 or c = 3", + "Best": "IndexReader(Index(t.c_d_e)[[1,3]]->Projection)" + }, + { + "SQL": "select b from t where c = 1 or c = 2 or c = 3 or c = 4 or c = 5", + "Best": "TableReader(Table(t)->Sel([or(or(eq(test.t.c, 1), eq(test.t.c, 2)), or(eq(test.t.c, 3), or(eq(test.t.c, 4), eq(test.t.c, 5))))])->Projection)" + }, + { + "SQL": "select a from t where c = 5", + "Best": "IndexReader(Index(t.c_d_e)[[5,5]]->Projection)" + }, + { + "SQL": "select a from t where c = 5 and b = 1", + "Best": "IndexLookUp(Index(t.c_d_e)[[5,5]], Table(t)->Sel([eq(test.t.b, 1)]))->Projection" + }, + { + "SQL": "select a from t where not a", + "Best": "PointGet(Handle(t.a)0)" + }, + { + "SQL": "select a from t where c in (1)", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Projection)" + }, + { + "SQL": "select a from t where c in ('1')", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Projection)" + }, + { + "SQL": "select a from t where c = 1.0", + "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Projection)" + }, + { + "SQL": "select a from t where c in (1) and d > 3", + "Best": "IndexReader(Index(t.c_d_e)[(1 3,1 +inf]]->Projection)" + }, + { + "SQL": "select a from t where c in (1, 2, 3) and (d > 3 and d < 4 or d > 5 and d < 6)", + "Best": "Dual->Projection" + }, + { + "SQL": "select a from t where c in (1, 2, 3) and (d > 2 and d < 4 or d > 5 and d < 7)", + "Best": "IndexReader(Index(t.c_d_e)[[1 3,1 3] [1 6,1 6] [2 3,2 3] [2 6,2 6] [3 3,3 3] [3 6,3 6]]->Projection)" + }, + { + "SQL": "select a from t where c in (1, 2, 3)", + "Best": "IndexReader(Index(t.c_d_e)[[1,1] [2,2] [3,3]]->Projection)" + }, + { + "SQL": "select a from t where c in (1, 2, 3) and d in (1,2) and e = 1", + "Best": "BatchPointGet(Index(t.c_d_e)[[KindInt64 1 KindInt64 1 KindInt64 1] [KindInt64 1 KindInt64 2 KindInt64 1] [KindInt64 2 KindInt64 1 KindInt64 1] [KindInt64 2 KindInt64 2 KindInt64 1] [KindInt64 3 KindInt64 1 KindInt64 1] [KindInt64 3 KindInt64 2 KindInt64 1]])->Projection" + }, + { + "SQL": "select a from t where d in (1, 2, 3)", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Sel([in(test.t.d, 1, 2, 3)])->Projection)" + }, + { + "SQL": "select a from t where c not in (1)", + "Best": "IndexReader(Index(t.c_d_e)[[-inf,1) (1,+inf]]->Projection)" + }, + { + "SQL": "select a from t use index(c_d_e) where c != 1", + "Best": "IndexReader(Index(t.c_d_e)[[-inf,1) (1,+inf]]->Projection)" + }, + { + "SQL": "select a from t where c_str like ''", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"\",\"\"]]->Sel([like(test.t.c_str, , 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc'", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc\",\"abc\"]]->Sel([like(test.t.c_str, abc, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str not like 'abc'", + "Best": "IndexReader(Index(t.c_d_e_str)[[NULL,+inf]]->Sel([not(like(test.t.c_str, abc, 92))])->Projection)" + }, + { + "SQL": "select a from t where not (c_str like 'abc' or c_str like 'abd')", + "Best": "IndexReader(Index(t.c_d_e_str)[[NULL,+inf]]->Sel([and(not(like(test.t.c_str, abc, 92)), not(like(test.t.c_str, abd, 92)))])->Projection)" + }, + { + "SQL": "select a from t where c_str like '_abc'", + "Best": "IndexReader(Index(t.c_d_e_str)[[NULL,+inf]]->Sel([like(test.t.c_str, _abc, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc%'", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc\",\"abd\")]->Sel([like(test.t.c_str, abc%, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc_'", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc\",\"abd\")]->Sel([like(test.t.c_str, abc_, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc%af'", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc\",\"abd\")]->Sel([like(test.t.c_str, abc%af, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc\\_' escape ''", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc_\"]]->Sel([like(test.t.c_str, abc\\_, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc\\_'", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc_\"]]->Sel([like(test.t.c_str, abc\\_, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc\\\\_'", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc_\"]]->Sel([like(test.t.c_str, abc\\_, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc\\_%'", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc`\")]->Sel([like(test.t.c_str, abc\\_%, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc=_%' escape '='", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc`\")]->Sel([like(test.t.c_str, abc=_%, 61)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 'abc\\__'", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc`\")]->Sel([like(test.t.c_str, abc\\__, 92)])->Projection)" + }, + { + "SQL": "select a from t where c_str like 123", + "Best": "IndexReader(Index(t.c_d_e_str)[[\"123\",\"123\"]]->Sel([like(test.t.c_str, 123, 92)])->Projection)" + }, + { + "SQL": "select a from t where c = 1.9 and d > 3", + "Best": "Dual" + }, + { + "SQL": "select a from t where c < 1.1", + "Best": "IndexReader(Index(t.c_d_e)[[-inf,2)]->Projection)" + }, + { + "SQL": "select a from t where c <= 1.9", + "Best": "IndexReader(Index(t.c_d_e)[[-inf,1]]->Projection)" + }, + { + "SQL": "select a from t where c >= 1.1", + "Best": "IndexReader(Index(t.c_d_e)[[2,+inf]]->Projection)" + }, + { + "SQL": "select a from t where c > 1.9", + "Best": "IndexReader(Index(t.c_d_e)[(1,+inf]]->Projection)" + }, + { + "SQL": "select a from t where c = 123456789098765432101234", + "Best": "Dual" + }, + { + "SQL": "select a from t where c = 'hanfei'", + "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Sel([eq(cast(test.t.c, double BINARY), cast(hanfei, double BINARY))])->Projection)" + } + ] + }, + { + "Name": "TestAggEliminator", + "Cases": [ + { + "SQL": "select max(a) from t;", + "Best": "TableReader(Table(t)->Limit)->Limit->StreamAgg" + }, + { + "SQL": "select min(a) from t;", + "Best": "TableReader(Table(t)->Limit)->Limit->StreamAgg" + }, + { + "SQL": "select min(c_str) from t;", + "Best": "IndexReader(Index(t.c_d_e_str)[[-inf,+inf]]->Limit)->Limit->StreamAgg" + }, + { + "SQL": "select max(a), b from t;", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a+1) from t;", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->TopN([plus(test.t.a, 1) true],0,1))->Projection->TopN([Column#40 true],0,1)->Projection->Projection->StreamAgg" + }, + { + "SQL": "select max(a), min(a) from t;", + "Best": "RightHashJoin{TableReader(Table(t)->Limit)->Limit->StreamAgg->TableReader(Table(t)->Limit)->Limit->StreamAgg}" + }, + { + "SQL": "select max(a), min(a) from t where a > 10", + "Best": "RightHashJoin{TableReader(Table(t)->Limit)->Limit->StreamAgg->TableReader(Table(t)->Limit)->Limit->StreamAgg}" + }, + { + "SQL": "select max(d), min(d) from t where c = 1 and d > 10", + "Best": "LeftHashJoin{IndexReader(Index(t.c_d_e)[(1 10,1 +inf]]->Limit)->Limit->StreamAgg->IndexReader(Index(t.c_d_e)[(1 10,1 +inf]]->Limit)->Limit->StreamAgg}" + }, + { + "SQL": "select max(a), max(c), min(f) from t", + "Best": "LeftHashJoin{RightHashJoin{TableReader(Table(t)->Limit)->Limit->StreamAgg->IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Limit)->Limit->StreamAgg}->IndexReader(Index(t.f)[[NULL,+inf]]->Limit)->Limit->StreamAgg}" + }, + { + "SQL": "select max(a), max(b) from t", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a), max(c) from t where c > 10", + "Best": "IndexReader(Index(t.c_d_e)[(10,+inf]]->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a), min(a) from t where a * 3 + 10 < 100", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->Sel([lt(plus(mul(test.t.a, 3), 10), 100)])->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a) from t group by b;", + "Best": "TableReader(Table(t)->HashAgg)->HashAgg" + }, + { + "SQL": "select max(a) from (select t1.a from t t1 join t t2 on t1.a=t2.a) t", + "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Limit->StreamAgg" + } + ] + }, + { + "Name": "TestRuleColumnPruningLogicalApply", + "Cases": [ + { + "SQL": "SELECT COUNT(*) FROM (SELECT a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->HashAgg)->HashAgg" + }, + { + "SQL": "SELECT COUNT(a) FROM (SELECT a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->HashAgg)->HashAgg" + }, + { + "SQL": "SELECT COUNT(t) FROM (SELECT a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t", + "Best": "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Limit}->HashAgg" + }, + { + "SQL": "SELECT COUNT(a) FROM t t1 WHERE t1.a IN (SELECT t2.a FROM t t2, t t3 WHERE t2.b = t3.b)", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.b)->HashAgg}(test.t.a,test.t.a)->HashAgg" + }, + { + "SQL": "SELECT a FROM (SELECT a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]])" + }, + { + "SQL": "SELECT a FROM t WHERE b IN (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a)", + "Best": "LeftHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->HashAgg}(test.t.b,test.t.b)" + }, + { + "SQL": "SELECT a FROM t WHERE EXISTS (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t2.b=t.b)", + "Best": "LeftHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)}(test.t.b,test.t.b)" + }, + { + "SQL": "SELECT a FROM t WHERE NOT EXISTS (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t2.b=t.b)", + "Best": "LeftHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)}(test.t.b,test.t.b)" + }, + { + "SQL": "SELECT a FROM t WHERE b IN (SELECT b FROM t WHERE b = 1 AND a IN (SELECT a FROM t WHERE a > 0))", + "Best": "RightHashJoin{IndexJoin{TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->TableReader(Table(t)->Sel([gt(test.t.a, 0)]))}(test.t.a,test.t.a)->HashAgg->TableReader(Table(t))}(test.t.b,test.t.b)" + }, + { + "SQL": "SELECT a FROM t WHERE b IN (SELECT b FROM t WHERE b = 1 AND a IN (SELECT t2.a FROM (SELECT t1.a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t2))", + "Best": "LeftHashJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->TableReader(Table(t))}(test.t.a,test.t.a)->HashAgg}(test.t.b,test.t.b)" + } + ] + }, + { + "Name": "TestUnmatchedTableInHint", + "Cases": [ + { + "SQL": "SELECT /*+ TIDB_SMJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", + "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ MERGE_JOIN(t3, t4) */ or /*+ TIDB_SMJ(t3, t4) */. Maybe you can use the table alias name" + }, + { + "SQL": "SELECT /*+ TIDB_HJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", + "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ HASH_JOIN(t3, t4) */ or /*+ TIDB_HJ(t3, t4) */. Maybe you can use the table alias name" + }, + { + "SQL": "SELECT /*+ TIDB_INLJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", + "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ INL_JOIN(t3, t4) */ or /*+ TIDB_INLJ(t3, t4) */. Maybe you can use the table alias name" + }, + { + "SQL": "SELECT /*+ TIDB_SMJ(t1, t2) */ * from t t1, t t2 where t1.a = t2.a", + "Warning": "" + }, + { + "SQL": "SELECT /*+ TIDB_SMJ(t3, t4) */ * from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a", + "Warning": "[planner:1815]There are no matching table names for (t4) in optimizer hint /*+ MERGE_JOIN(t3, t4) */ or /*+ TIDB_SMJ(t3, t4) */. Maybe you can use the table alias name" + } + ] + }, + { + "Name": "TestJoinHints", + "Cases": [ + { + "SQL": "select /*+ TIDB_INLJ(t1) */ t1.a, t2.a, t3.a from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a;", + "Best": "MergeInnerJoin{IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)", + "Warning": "", + "Hints": "merge_join(`test`.`t3`), leading(`test`.`t1`, `test`.`t2`, `test`.`t3`), inl_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_1` `test`.`t2` ), order_index(@`sel_1` `test`.`t2` `primary`), use_index(@`sel_1` `test`.`t3` ), order_index(@`sel_1` `test`.`t3` `primary`)" + }, + { + "SQL": "select /*+ TIDB_INLJ(test.t1) */ t1.a, t2.a, t3.a from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a;", + "Best": "MergeInnerJoin{IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)", + "Warning": "", + "Hints": "merge_join(`test`.`t3`), leading(`test`.`t1`, `test`.`t2`, `test`.`t3`), inl_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_1` `test`.`t2` ), order_index(@`sel_1` `test`.`t2` `primary`), use_index(@`sel_1` `test`.`t3` ), order_index(@`sel_1` `test`.`t3` `primary`)" + }, + { + "SQL": "select /*+ TIDB_INLJ(t1) */ t1.b, t2.a from t t1, t t2 where t1.b = t2.a;", + "Best": "LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.b,test.t.a)", + "Warning": "[planner:1815]Optimizer Hint /*+ INL_JOIN(t1) */ or /*+ TIDB_INLJ(t1) */ is inapplicable", + "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_1` `test`.`t2` `f`), no_order_index(@`sel_1` `test`.`t2` `f`)" + }, + { + "SQL": "select /*+ TIDB_INLJ(t2) */ t1.b, t2.a from t2 t1, t2 t2 where t1.b=t2.b and t2.c=-1;", + "Best": "IndexJoin{TableReader(Table(t2)->Sel([eq(test.t2.c, -1)]))->IndexReader(Index(t2.b)[[NULL,NULL]])}(test.t2.b,test.t2.b)->Projection", + "Warning": "[planner:1815]Optimizer Hint /*+ INL_JOIN(t2) */ or /*+ TIDB_INLJ(t2) */ is inapplicable", + "Hints": "inl_join(`test`.`t1`), use_index(@`sel_1` `test`.`t2` ), no_order_index(@`sel_1` `test`.`t2` `primary`), use_index(@`sel_1` `test`.`t1` `b`), no_order_index(@`sel_1` `test`.`t1` `b`)" + } + ] + }, + { + "Name": "TestAggregationHints", + "Cases": [ + { + "SQL": "select count(*) from t t1, t t2 where t1.a = t2.b", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->StreamAgg", + "Warning": "" + }, + { + "SQL": "select count(t1.a) from t t1, t t2 where t1.a = t2.a*2 group by t1.a", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]])->Projection}(test.t.a,Column#26)->HashAgg", + "Warning": "" + }, + { + "SQL": "select /*+ HASH_AGG() */ count(*) from t t1, t t2 where t1.a = t2.b", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->HashAgg", + "Warning": "" + }, + { + "SQL": "select /*+ STREAM_AGG() */ count(t1.a) from t t1, t t2 where t1.a = t2.a*2 group by t1.a", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]])->Projection}(test.t.a,Column#26)->Sort->StreamAgg", + "Warning": "" + }, + { + "SQL": "select /*+ HASH_AGG() STREAM_AGG() */ count(*) from t t1, t t2 where t1.a = t2.b", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->StreamAgg", + "Warning": "[planner:1815]Optimizer aggregation hints are conflicted" + }, + { + "SQL": "select /*+ STREAM_AGG() */ distinct a from t", + "Best": "IndexReader(Index(t.f)[[NULL,+inf]])", + "Warning": "" + }, + { + "SQL": "select /*+ HASH_AGG() */ t1.a from t t1 where t1.a < any(select t2.b from t t2)", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t)->HashAgg)->HashAgg->Sel([ne(Column#27, 0)])}", + "Warning": "" + }, + { + "SQL": "select /*+ hash_agg() */ t1.a from t t1 where t1.a != any(select t2.b from t t2)", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))->HashAgg->Sel([ne(Column#28, 0)])}", + "Warning": "" + }, + { + "SQL": "select /*+ hash_agg() */ t1.a from t t1 where t1.a = all(select t2.b from t t2)", + "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))->HashAgg->Sel([or(and(le(Column#26, 1), if(ne(Column#27, 0), , 1)), or(eq(Column#28, 0), 0))])}", + "Warning": "" + }, + { + "SQL": "select /*+ STREAM_AGG() */ sum(t1.a) from t t1 join t t2 on t1.b = t2.b group by t1.b", + "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))->Sort->Projection->StreamAgg}(test.t.b,test.t.b)->HashAgg", + "Warning": "" + }, + { + "SQL": "select /*+ STREAM_AGG() */ e, sum(b) from t group by e", + "Best": "TableReader(Table(t))->Sort->Projection->StreamAgg->Projection", + "Warning": "" + } + ] + }, + { + "Name": "TestQueryBlockHint", + "Cases": [ + { + "SQL": "select /*+ MERGE_JOIN(@sel_1 t1), INL_JOIN(@sel_2 t3) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Plan": "IndexJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)", + "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), no_order_index(@`sel_2` `test`.`t3` `c_d_e`)" + }, + { + "SQL": "select /*+ MERGE_JOIN(@sel_1 t1), INL_JOIN(@qb t3) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Plan": "IndexJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)", + "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), no_order_index(@`sel_2` `test`.`t3` `c_d_e`)" + }, + { + "SQL": "select /*+ HASH_JOIN(@sel_1 t1), MERGE_JOIN(@sel_2 t2) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Plan": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)", + "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), order_index(@`sel_2` `test`.`t3` `c_d_e`)" + }, + { + "SQL": "select /*+ HASH_JOIN(@sel_1 t1), MERGE_JOIN(@qb t2) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Plan": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)", + "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), order_index(@`sel_2` `test`.`t3` `c_d_e`)" + }, + { + "SQL": "select /*+ INL_JOIN(@sel_1 t1), HASH_JOIN(@sel_2 t2) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Plan": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)", + "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), order_index(@`sel_2` `test`.`t3` `c_d_e`)" + }, + { + "SQL": "select /*+ INL_JOIN(@sel_1 t1), HASH_JOIN(@qb t2) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + "Plan": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)", + "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), order_index(@`sel_2` `test`.`t3` `c_d_e`)" + }, + { + "SQL": "select /*+ HASH_AGG(@sel_1), STREAM_AGG(@sel_2) */ count(*) from t t1 where t1.a < (select count(*) from t t2 where t1.a > t2.a)", + "Plan": "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)])->StreamAgg)->StreamAgg}->HashAgg", + "Hints": "hash_agg(@`sel_1`), use_index(@`sel_1` `test`.`t1` `f`), no_order_index(@`sel_1` `test`.`t1` `f`), stream_agg(@`sel_2`), use_index(@`sel_2` `test`.`t2` `f`), no_order_index(@`sel_2` `test`.`t2` `f`), agg_to_cop(@`sel_2`)" + }, + { + "SQL": "select /*+ STREAM_AGG(@sel_1), HASH_AGG(@qb) */ count(*) from t t1 where t1.a < (select /*+ QB_NAME(qb) */ count(*) from t t2 where t1.a > t2.a)", + "Plan": "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)])->HashAgg)->HashAgg}->StreamAgg", + "Hints": "stream_agg(@`sel_1`), use_index(@`sel_1` `test`.`t1` `f`), no_order_index(@`sel_1` `test`.`t1` `f`), hash_agg(@`sel_2`), use_index(@`sel_2` `test`.`t2` `f`), no_order_index(@`sel_2` `test`.`t2` `f`), agg_to_cop(@`sel_2`)" + }, + { + "SQL": "select /*+ HASH_AGG(@sel_2) */ a, (select count(*) from t t1 where t1.b > t.a) from t where b > (select b from t t2 where t2.b = t.a limit 1)", + "Plan": "Apply{Apply{TableReader(Table(t))->TableReader(Table(t)->Sel([eq(test.t.b, test.t.a)])->Limit)->Limit}->TableReader(Table(t)->Sel([gt(test.t.b, test.t.a)])->HashAgg)->HashAgg}->Projection", + "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`), use_index(@`sel_3` `test`.`t2` ), no_order_index(@`sel_3` `test`.`t2` `primary`), limit_to_cop(@`sel_3`), hash_agg(@`sel_2`), use_index(@`sel_2` `test`.`t1` ), no_order_index(@`sel_2` `test`.`t1` `primary`), agg_to_cop(@`sel_2`)" + }, + { + "SQL": "select /*+ HASH_JOIN(@sel_1 t1), HASH_JOIN(@sel_2 t1) */ t1.b, t2.a, t2.aa from t t1, (select t1.a as a, t2.a as aa from t t1, t t2) t2 where t1.a = t2.aa;", + "Plan": "LeftHashJoin{LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.a,test.t.a)->IndexReader(Index(t.f)[[NULL,+inf]])}->Projection", + "Hints": "use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` `f`), no_order_index(@`sel_2` `test`.`t2` `f`), use_index(@`sel_2` `test`.`t1` `f`), no_order_index(@`sel_2` `test`.`t1` `f`)" + }, + { + "SQL": "select /*+ HASH_JOIN(@sel_2 t1@sel_2, t2@sel_2), MERGE_JOIN(@sel_1 t1@sel_1, t2@sel_1) */ * from (select t1.a, t1.b from t t1, t t2 where t1.a = t2.a) t1, t t2 where t1.b = t2.b", + "Plan": "MergeInnerJoin{TableReader(Table(t))->Sort->LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.a,test.t.a)->Sort}(test.t.b,test.t.b)->Projection", + "Hints": "use_index(@`sel_1` `test`.`t2` ), no_order_index(@`sel_1` `test`.`t2` `primary`), hash_join_build(@`sel_2` `test`.`t2`@`sel_2`), use_index(@`sel_2` `test`.`t1` ), no_order_index(@`sel_2` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` `f`), no_order_index(@`sel_2` `test`.`t2` `f`)" + } + ] + }, + { + "Name": "TestSemiJoinToInner", + "Cases": [ + { + "SQL": "select t1.a, (select count(t2.a) from t t2 where t2.g in (select t3.d from t t3 where t3.c = t1.a)) as agg_col from t t1;", + "Best": "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexHashJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]]->HashAgg)->HashAgg->IndexReader(Index(t.g)[[NULL,NULL]])}(test.t.d,test.t.g)}->HashAgg" + } + ] + }, + { + "Name": "TestIndexJoinHint", + "Cases": [ + { + "SQL": "select /*+ INL_JOIN(t1) */ * from t1 join t2 on t1.a = t2.a;", + "Plan": "IndexJoin{IndexLookUp(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]), Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t2.a,test.t1.a)", + "Warns": null + }, + { + "SQL": "select /*+ INL_HASH_JOIN(t1) */ * from t1 join t2 on t1.a = t2.a;", + "Plan": "IndexHashJoin{IndexLookUp(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]), Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t2.a,test.t1.a)", + "Warns": null + }, + { + "SQL": "select /*+ INL_MERGE_JOIN(t1) */ * from t1 join t2 on t1.a = t2.a;", + "Plan": "LeftHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", + "Warns": [ + "[planner:1815]The INDEX MERGE JOIN hint is deprecated for usage, try other hints." + ] + }, + { + "SQL": "select /*+ inl_merge_join(t2) */ t1.a, t2.a from t t1 left join t t2 use index(g_2) on t1.g=t2.g", + "Plan": "MergeLeftOuterJoin{IndexReader(Index(t.g_2)[[NULL,+inf]])->IndexReader(Index(t.g_2)[[-inf,+inf]])}(test.t.g,test.t.g)", + "Warns": [ + "[planner:1815]The INDEX MERGE JOIN hint is deprecated for usage, try other hints." + ] + }, + { + "SQL": "select /*+inl_merge_join(t2)*/ t1.a, t2.a from t t1 left join t t2 use index(g_2) on t1.g=t2.g order by t1.a", + "Plan": "IndexHashJoin{TableReader(Table(t))->IndexReader(Index(t.g_2)[[NULL,NULL]]->Sel([not(isnull(test.t.g))]))}(test.t.g,test.t.g)", + "Warns": [ + "[planner:1815]The INDEX MERGE JOIN hint is deprecated for usage, try other hints." + ] + } + ] + }, + { + "Name": "TestAggToCopHint", + "Cases": [ + { + "SQL": "select /*+ AGG_TO_COP(), HASH_AGG(), USE_INDEX(t) */ sum(a) from ta group by a", + "Best": "IndexReader(Index(ta.a)[[NULL,+inf]]->HashAgg)->HashAgg", + "Warning": "[planner:1815]use_index(test.t) is inapplicable, check whether the table(test.t) exists" + }, + { + "SQL": "select /*+ AGG_TO_COP(), USE_INDEX(t) */ sum(b) from ta group by b", + "Best": "TableReader(Table(ta)->HashAgg)->HashAgg", + "Warning": "[planner:1815]use_index(test.t) is inapplicable, check whether the table(test.t) exists" + }, + { + "SQL": "select /*+ AGG_TO_COP(), HASH_AGG(), USE_INDEX(t) */ distinct a from ta group by a", + "Best": "IndexReader(Index(ta.a)[[NULL,+inf]]->HashAgg)->HashAgg", + "Warning": "[planner:1815]use_index(test.t) is inapplicable, check whether the table(test.t) exists" + }, + { + "SQL": "select /*+ AGG_TO_COP(), HASH_AGG(), HASH_JOIN(t1), USE_INDEX(t1), USE_INDEX(t2) */ sum(t1.a) from ta t1, ta t2 where t1.a = t2.b group by t1.a", + "Best": "LeftHashJoin{TableReader(Table(ta)->Sel([not(isnull(test.ta.a))]))->TableReader(Table(ta)->Sel([not(isnull(test.ta.b))]))}(test.ta.a,test.ta.b)->Projection->HashAgg", + "Warning": "[planner:1815]Optimizer Hint AGG_TO_COP is inapplicable" + } + ] + }, + { + "Name": "TestGroupConcatOrderby", + "Cases": [ + { + "SQL": "select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test;", + "Plan": [ + "HashAgg 1.00 root funcs:group_concat(Column#6 order by Column#7 desc separator \"++\")->Column#4, funcs:group_concat(Column#8 order by Column#7 desc, Column#9 separator \"--\")->Column#5", + "└─Projection 10000.00 root cast(test.test.name, var_string(20))->Column#6, test.test.name->Column#7, cast(test.test.id, var_string(20))->Column#8, test.test.id->Column#9", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:test keep order:false, stats:pseudo" + ], + "Result": [ + "500++200++30++20++20++10 3--3--1--1--2--1" + ] + }, + { + "SQL": "select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest;", + "Plan": [ + "HashAgg 1.00 root funcs:group_concat(Column#6 order by Column#7 desc separator \"++\")->Column#4, funcs:group_concat(Column#8 order by Column#7 desc, Column#9 separator \"--\")->Column#5", + "└─Projection 10000.00 root cast(test.ptest.name, var_string(20))->Column#6, test.ptest.name->Column#7, cast(test.ptest.id, var_string(20))->Column#8, test.ptest.id->Column#9", + " └─TableReader 10000.00 root partition:all data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:ptest keep order:false, stats:pseudo" + ], + "Result": [ + "500++200++30++20++20++10 3--3--1--1--2--1" + ] + }, + { + "SQL": "select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from test;", + "Plan": [ + "HashAgg 1.00 root funcs:group_concat(distinct Column#5 order by Column#6 desc separator \",\")->Column#4", + "└─Projection 10000.00 root cast(test.test.name, var_string(20))->Column#5, test.test.name->Column#6", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:test keep order:false, stats:pseudo" + ], + "Result": [ + "500,200,30,20,10" + ] + }, + { + "SQL": "select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest;", + "Plan": [ + "HashAgg 1.00 root funcs:group_concat(distinct Column#5 order by Column#6 desc separator \",\")->Column#4", + "└─Projection 10000.00 root cast(test.ptest.name, var_string(20))->Column#5, test.ptest.name->Column#6", + " └─TableReader 10000.00 root partition:all data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:ptest keep order:false, stats:pseudo" + ], + "Result": [ + "500,200,30,20,10" + ] + } + ] + }, + { + "Name": "TestInlineProjection", + "Cases": [ + { + "SQL": "select /*+ HASH_JOIN(t1) */ t1.b, t2.b from t1, t2 where t1.a = t2.a;", + "Plan": "LeftHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", + "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" + }, + { + "SQL": "select /*+ HASH_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", + "Plan": "LeftHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", + "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" + }, + { + "SQL": "select /*+ HASH_JOIN(t1) */ t1.b, t2.b from t1 left outer join t2 on t1.a = t2.a;", + "Plan": "LeftHashJoin{TableReader(Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", + "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" + }, + { + "SQL": "select /*+ HASH_JOIN(t1) */ t1.b, t2.b from t1 right outer join t2 on t1.a = t2.a;", + "Plan": "RightHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2))}(test.t1.a,test.t2.a)", + "Hints": "hash_join_build(`test`.`t1`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" + }, + { + "SQL": "select 1 from (select /*+ HASH_JOIN(t1) */ t1.a in (select t2.a from t2) from t1) x;", + "Plan": "LeftHashJoin{IndexReader(Index(t1.idx_a)[[NULL,+inf]])->IndexReader(Index(t2.idx_a)[[NULL,+inf]])}->Projection", + "Hints": "hash_join(@`sel_2` `test`.`t1`@`sel_2`), use_index(@`sel_2` `test`.`t1` `idx_a`), no_order_index(@`sel_2` `test`.`t1` `idx_a`), use_index(@`sel_3` `test`.`t2` `idx_a`), no_order_index(@`sel_3` `test`.`t2` `idx_a`)" + }, + { + "SQL": "select 1 from (select /*+ HASH_JOIN(t1) */ t1.a not in (select t2.a from t2) from t1) x;", + "Plan": "LeftHashJoin{IndexReader(Index(t1.idx_a)[[NULL,+inf]])->IndexReader(Index(t2.idx_a)[[NULL,+inf]])}->Projection", + "Hints": "hash_join(@`sel_2` `test`.`t1`@`sel_2`), use_index(@`sel_2` `test`.`t1` `idx_a`), no_order_index(@`sel_2` `test`.`t1` `idx_a`), use_index(@`sel_3` `test`.`t2` `idx_a`), no_order_index(@`sel_3` `test`.`t2` `idx_a`)" + }, + { + "SQL": "select /*+ INL_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", + "Plan": "IndexJoin{IndexLookUp(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]), Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t2.a,test.t1.a)", + "Hints": "inl_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` `idx_a`), no_order_index(@`sel_1` `test`.`t1` `idx_a`), use_index(@`sel_1` `test`.`t2` )" + }, + { + "SQL": "select /*+ INL_HASH_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", + "Plan": "IndexHashJoin{IndexLookUp(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]), Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t2.a,test.t1.a)", + "Hints": "inl_hash_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` `idx_a`), no_order_index(@`sel_1` `test`.`t1` `idx_a`), use_index(@`sel_1` `test`.`t2` )" + }, + { + "SQL": "select /*+ INL_MERGE_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", + "Plan": "LeftHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", + "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" + }, + { + "SQL": "select /*+ MERGE_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", + "Plan": "MergeInnerJoin{IndexLookUp(Index(t1.idx_a)[[-inf,+inf]], Table(t1))->Projection->IndexLookUp(Index(t2.idx_a)[[-inf,+inf]], Table(t2))->Projection}(test.t1.a,test.t2.a)", + "Hints": "merge_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` `idx_a`), order_index(@`sel_1` `test`.`t1` `idx_a`), use_index(@`sel_1` `test`.`t2` `idx_a`), order_index(@`sel_1` `test`.`t2` `idx_a`)" + } + ] + }, + { + "Name": "TestHintFromDiffDatabase", + "Cases": [ + { + "SQL": "select /*+ inl_hash_join(test.t1) */ * from test.t2 join test.t1 on test.t2.a = test.t1.a", + "Plan": "IndexHashJoin{IndexReader(Index(t2.idx_a)[[-inf,+inf]])->IndexReader(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]))}(test.t2.a,test.t1.a)" + } + ] + }, + { + "Name": "TestMPPSinglePartitionType", + "Cases": [ + { + "SQL": "select * from employee where deptid>1", + "Plan": [ + "TableReader 3333.33 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: PassThrough", + " └─Selection 3333.33 mpp[tiflash] gt(test.employee.deptid, 1)", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select deptid+5, empid*10 from employee where deptid>1", + "Plan": [ + "TableReader 3333.33 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 3333.33 mpp[tiflash] plus(test.employee.deptid, 5)->Column#5, mul(test.employee.empid, 10)->Column#6", + " └─Selection 3333.33 mpp[tiflash] gt(test.employee.deptid, 1)", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select count(*) from employee group by deptid+1", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] Column#5", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#12, funcs:sum(Column#13)->Column#5", + " └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#12, collate: binary]", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#14, funcs:count(1)->Column#13", + " └─Projection 10000.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#14", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select count(distinct deptid) a from employee", + "Plan": [ + "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 mpp[tiflash] Column#5", + " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#7)->Column#5", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#7", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from employee join employee e1 using(deptid)", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12487.50 mpp[tiflash] test.employee.deptid, test.employee.empid, test.employee.salary, test.employee.empid, test.employee.salary", + " └─Projection 12487.50 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, test.employee.empid, test.employee.salary", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, test.employee.deptid)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select count(distinct a) from (select count(distinct deptid) a from employee) x", + "Plan": [ + "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 mpp[tiflash] Column#6", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct Column#5)->Column#6", + " └─Projection 1.00 mpp[tiflash] Column#5", + " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#8)->Column#5", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#8", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select count(a) from (select count(distinct deptid) a, count(distinct empid) b from employee) x group by b+1", + "Plan": [ + "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 mpp[tiflash] Column#7", + " └─HashAgg 1.00 mpp[tiflash] group by:Column#12, funcs:sum(Column#13)->Column#7", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#12, collate: binary]", + " └─HashAgg 1.00 mpp[tiflash] group by:Column#15, funcs:count(Column#14)->Column#13", + " └─Projection 1.00 mpp[tiflash] Column#5->Column#14, plus(Column#6, 1)->Column#15", + " └─Projection 1.00 mpp[tiflash] Column#5, Column#6", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#5, funcs:count(distinct test.employee.empid)->Column#6", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, test.employee.empid, ", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select count(a) from (select count(distinct deptid) a, count(distinct empid) b from employee) x group by b", + "Plan": [ + "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 mpp[tiflash] Column#7", + " └─HashAgg 1.00 mpp[tiflash] group by:Column#6, funcs:count(Column#5)->Column#7", + " └─Projection 1.00 mpp[tiflash] Column#5, Column#6", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#5, funcs:count(distinct test.employee.empid)->Column#6", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, test.employee.empid, ", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from employee join (select count(distinct deptid) a, count(distinct empid) b from employee) e1", + "Plan": [ + "TableReader 10000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 10000.00 mpp[tiflash] CARTESIAN inner join", + " ├─ExchangeReceiver(Build) 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 1.00 mpp[tiflash] Column#9, Column#10", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#9, funcs:count(distinct test.employee.empid)->Column#10", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, test.employee.empid, ", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from employee e1 join (select count(distinct deptid) a from employee) e2 on e1.deptid = e2.a", + "Plan": [ + "TableReader 1.25 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 1.25 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 1.25 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#9)]", + " ├─ExchangeReceiver(Build) 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 1.00 mpp[tiflash] Column#9", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#10)->Column#9", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#10", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from (select count(distinct deptid) a from employee) e1 join employee e2 on e1.a = e2.deptid", + "Plan": [ + "TableReader 1.25 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 1.25 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 1.25 mpp[tiflash] Column#5, test.employee.empid, test.employee.deptid, test.employee.salary", + " └─HashJoin 1.25 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#5)]", + " ├─ExchangeReceiver(Build) 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 1.00 mpp[tiflash] Column#5", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#10)->Column#5", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#10", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from (select count(distinct deptid) a from employee) e1 join (select count(distinct deptid) b from employee) e2 on e1.a=e2.b", + "Plan": [ + "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 1.00 mpp[tiflash] inner join, equal:[eq(Column#5, Column#10)]", + " ├─ExchangeReceiver(Build) 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 1.00 mpp[tiflash] Column#5", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#11)->Column#5", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#11", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─Projection(Probe) 1.00 mpp[tiflash] Column#10", + " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#12)->Column#10", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#12", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from employee e1 join employee e2 on e1.deptid = e2.deptid", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, test.employee.deptid)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from (select deptid+1 d, count(empid) a from employee group by d) e1 join employee e2 on e1.d = e2.deptid", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] Column#6, Column#5, test.employee.empid, test.employee.deptid, test.employee.salary", + " └─HashJoin 8000.00 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#6)]", + " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", + " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#6, Column#5", + " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", + " │ └─Projection 8000.00 mpp[tiflash] Column#5, test.employee.deptid", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#5, funcs:firstrow(Column#15)->test.employee.deptid", + " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#27, funcs:count(Column#25)->Column#14, funcs:firstrow(Column#26)->Column#15", + " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#25, test.employee.deptid->Column#26, plus(test.employee.deptid, 1)->Column#27", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from employee e1 join (select deptid+1 d, count(empid) a from employee group by d) e2 on e1.deptid = e2.d", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 8000.00 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#10)]", + " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", + " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#10, Column#9", + " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", + " │ └─Projection 8000.00 mpp[tiflash] Column#9, test.employee.deptid", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#9, funcs:firstrow(Column#15)->test.employee.deptid", + " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#27, funcs:count(Column#25)->Column#14, funcs:firstrow(Column#26)->Column#15", + " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#25, test.employee.deptid->Column#26, plus(test.employee.deptid, 1)->Column#27", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from (select deptid+1 d, count(empid) a from employee group by d) e1 join (select deptid+1 d, count(empid) a from employee group by d) e2 on e1.d = e2.d", + "Plan": [ + "TableReader 6400.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 6400.00 mpp[tiflash] inner join, equal:[eq(Column#6, Column#12)]", + " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", + " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#6, Column#5", + " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", + " │ └─Projection 8000.00 mpp[tiflash] Column#5, test.employee.deptid", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#17, funcs:sum(Column#18)->Column#5, funcs:firstrow(Column#19)->test.employee.deptid", + " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#17, collate: binary]", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#43, funcs:count(Column#41)->Column#18, funcs:firstrow(Column#42)->Column#19", + " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#41, test.employee.deptid->Column#42, plus(test.employee.deptid, 1)->Column#43", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─Projection(Probe) 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#12, Column#11", + " └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", + " └─Projection 8000.00 mpp[tiflash] Column#11, test.employee.deptid", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#20, funcs:sum(Column#21)->Column#11, funcs:firstrow(Column#22)->test.employee.deptid", + " └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#20, collate: binary]", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#46, funcs:count(Column#44)->Column#21, funcs:firstrow(Column#45)->Column#22", + " └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#44, test.employee.deptid->Column#45, plus(test.employee.deptid, 1)->Column#46", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "set tidb_broadcast_join_threshold_count=1", + "Plan": null + }, + { + "SQL": "set tidb_broadcast_join_threshold_size=1", + "Plan": null + }, + { + "SQL": "select * from (select count(distinct deptid) a from employee) e1 join employee e2 on e1.a = e2.deptid", + "Plan": [ + "Projection 1.25 root Column#5, test.employee.empid, test.employee.deptid, test.employee.salary", + "└─HashJoin 1.25 root inner join, equal:[eq(test.employee.deptid, Column#5)]", + " ├─TableReader(Build) 1.00 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─Projection 1.00 mpp[tiflash] Column#5", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#12)->Column#5", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#12", + " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─TableReader(Probe) 9990.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from (select count(distinct deptid) a from employee) e1 join (select count(distinct deptid) b from employee) e2 on e1.a=e2.b", + "Plan": [ + "HashJoin 1.00 root inner join, equal:[eq(Column#5, Column#10)]", + "├─TableReader(Build) 1.00 root MppVersion: 2, data:ExchangeSender", + "│ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + "│ └─Projection 1.00 mpp[tiflash] Column#10", + "│ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#16)->Column#10", + "│ └─ExchangeReceiver 1.00 mpp[tiflash] ", + "│ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + "│ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#16", + "│ └─ExchangeReceiver 1.00 mpp[tiflash] ", + "│ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + "│ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + "│ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + "└─TableReader(Probe) 1.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 mpp[tiflash] Column#5", + " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#15)->Column#5", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#15", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from employee e1 join employee e2 on e1.deptid = e2.deptid", + "Plan": [ + "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, test.employee.deptid)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from (select deptid+1 d, count(empid) a from employee group by d) e1 join employee e2 on e1.d = e2.deptid", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] Column#6, Column#5, test.employee.empid, test.employee.deptid, test.employee.salary", + " └─Projection 8000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#6, Column#5, Column#26", + " └─HashJoin 8000.00 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#6)]", + " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", + " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#6, collate: binary]", + " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#6, Column#5", + " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", + " │ └─Projection 8000.00 mpp[tiflash] Column#5, test.employee.deptid", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#5, funcs:firstrow(Column#15)->test.employee.deptid", + " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#29, funcs:count(Column#27)->Column#14, funcs:firstrow(Column#28)->Column#15", + " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#27, test.employee.deptid->Column#28, plus(test.employee.deptid, 1)->Column#29", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#26, collate: binary]", + " └─Projection 9990.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, cast(test.employee.deptid, bigint(20))->Column#26", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from employee e1 join (select deptid+1 d, count(empid) a from employee group by d) e2 on e1.deptid = e2.d", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#10, Column#9", + " └─Projection 8000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#10, Column#9, Column#26", + " └─HashJoin 8000.00 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#10)]", + " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", + " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#10, collate: binary]", + " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#10, Column#9", + " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", + " │ └─Projection 8000.00 mpp[tiflash] Column#9, test.employee.deptid", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#9, funcs:firstrow(Column#15)->test.employee.deptid", + " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#29, funcs:count(Column#27)->Column#14, funcs:firstrow(Column#28)->Column#15", + " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#27, test.employee.deptid->Column#28, plus(test.employee.deptid, 1)->Column#29", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", + " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#26, collate: binary]", + " └─Projection 9990.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, cast(test.employee.deptid, bigint(20))->Column#26", + " └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", + " └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo" + ] + }, + { + "SQL": "select * from (select deptid+1 d, count(empid) a from employee group by d) e1 join (select deptid+1 d, count(empid) a from employee group by d) e2 on e1.d = e2.d", + "Plan": [ + "TableReader 6400.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashJoin 6400.00 mpp[tiflash] inner join, equal:[eq(Column#6, Column#12)]", + " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", + " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#6, collate: binary]", + " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#6, Column#5", + " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", + " │ └─Projection 8000.00 mpp[tiflash] Column#5, test.employee.deptid", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#17, funcs:sum(Column#18)->Column#5, funcs:firstrow(Column#19)->test.employee.deptid", + " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#17, collate: binary]", + " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#43, funcs:count(Column#41)->Column#18, funcs:firstrow(Column#42)->Column#19", + " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#41, test.employee.deptid->Column#42, plus(test.employee.deptid, 1)->Column#43", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", + " └─ExchangeReceiver(Probe) 6400.00 mpp[tiflash] ", + " └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#12, collate: binary]", + " └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#12, Column#11", + " └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", + " └─Projection 8000.00 mpp[tiflash] Column#11, test.employee.deptid", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#20, funcs:sum(Column#21)->Column#11, funcs:firstrow(Column#22)->test.employee.deptid", + " └─ExchangeReceiver 8000.00 mpp[tiflash] ", + " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#20, collate: binary]", + " └─HashAgg 8000.00 mpp[tiflash] group by:Column#46, funcs:count(Column#44)->Column#21, funcs:firstrow(Column#45)->Column#22", + " └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#44, test.employee.deptid->Column#45, plus(test.employee.deptid, 1)->Column#46", + " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestSemiJoinRewriteHints", + "Cases": [ + { + "SQL": "select /*+ SEMI_JOIN_REWRITE() */ * from t", + "Plan": [ + "TableReader 10000.00 root data:TableFullScan", + "└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": "[planner:1815]The SEMI_JOIN_REWRITE hint is not used correctly, maybe it's not in a subquery or the subquery is not EXISTS clause." + }, + { + "SQL": "select * from t where a > (select /*+ SEMI_JOIN_REWRITE() */ min(b) from t t1 where t1.c = t.c)", + "Plan": [ + "HashJoin 7992.00 root inner join, equal:[eq(test.t.c, test.t.c)], other cond:gt(test.t.a, Column#9)", + "├─Selection(Build) 6393.60 root not(isnull(Column#9))", + "│ └─HashAgg 7992.00 root group by:test.t.c, funcs:min(Column#10)->Column#9, funcs:firstrow(test.t.c)->test.t.c", + "│ └─TableReader 7992.00 root data:HashAgg", + "│ └─HashAgg 7992.00 cop[tikv] group by:test.t.c, funcs:min(test.t.b)->Column#10", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t.c))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 9980.01 root data:Selection", + " └─Selection 9980.01 cop[tikv] not(isnull(test.t.a)), not(isnull(test.t.c))", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": "[planner:1815]The SEMI_JOIN_REWRITE hint is not used correctly, maybe it's not in a subquery or the subquery is not EXISTS clause." + }, + { + "SQL": "select * from t where exists (select /*+ SEMI_JOIN_REWRITE() */ 1 from t t1 where t1.a=t.a)", + "Plan": [ + "HashJoin 9990.00 root inner join, equal:[eq(test.t.a, test.t.a)]", + "├─HashAgg(Build) 7992.00 root group by:test.t.a, funcs:firstrow(test.t.a)->test.t.a", + "│ └─TableReader 7992.00 root data:HashAgg", + "│ └─HashAgg 7992.00 cop[tikv] group by:test.t.a, ", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": "" + }, + { + "SQL": "select * from t where exists (select /*+ SEMI_JOIN_REWRITE() */ t.b from t t1 where t1.a=t.a)", + "Plan": [ + "HashJoin 9990.00 root inner join, equal:[eq(test.t.a, test.t.a)]", + "├─HashAgg(Build) 7992.00 root group by:test.t.a, funcs:firstrow(test.t.a)->test.t.a", + "│ └─TableReader 7992.00 root data:HashAgg", + "│ └─HashAgg 7992.00 cop[tikv] group by:test.t.a, ", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": "" + }, + { + "SQL": "select exists(select /*+ SEMI_JOIN_REWRITE() */ * from t t1 where t1.a=t.a) from t", + "Plan": [ + "HashJoin 10000.00 root left outer semi join, equal:[eq(test.t.a, test.t.a)]", + "├─TableReader(Build) 10000.00 root data:TableFullScan", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": "[planner:1815]SEMI_JOIN_REWRITE() is inapplicable for LeftOuterSemiJoin." + }, + { + "SQL": "select * from t where exists (select /*+ SEMI_JOIN_REWRITE() */ 1 from t t1 where t1.a > t.a)", + "Plan": [ + "HashJoin 7992.00 root CARTESIAN semi join, other cond:gt(test.t.a, test.t.a)", + "├─TableReader(Build) 9990.00 root data:Selection", + "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 9990.00 root data:Selection", + " └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": "[planner:1815]SEMI_JOIN_REWRITE() is inapplicable for SemiJoin with left conditions or other conditions." + } + ] + }, + { + "Name": "TestHJBuildAndProbeHint4DynamicPartitionTable", + "Cases": [ + { + "SQL": "select /*+ hash_join_build(t2) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", + "Plan": [ + "HashJoin 12475.01 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", + "├─TableReader(Build) 9980.01 root partition:all data:Selection", + "│ └─Selection 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─TableReader(Probe) 9980.01 root partition:all data:Selection", + " └─Selection 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Result": [ + "1 1" + ], + "Warning": null + }, + { + "SQL": "select /*+ hash_join_probe(t2) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", + "Plan": [ + "HashJoin 12475.01 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", + "├─TableReader(Build) 9980.01 root partition:all data:Selection", + "│ └─Selection 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 9980.01 root partition:all data:Selection", + " └─Selection 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Result": [ + "1 1" + ], + "Warning": null + }, + { + "SQL": "select /*+ hash_join_build(t1) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", + "Plan": [ + "HashJoin 12475.01 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", + "├─TableReader(Build) 9980.01 root partition:all data:Selection", + "│ └─Selection 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + "└─TableReader(Probe) 9980.01 root partition:all data:Selection", + " └─Selection 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))", + " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ], + "Result": [ + "1 1" + ], + "Warning": null + }, + { + "SQL": "select /*+ hash_join_probe(t1) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", + "Plan": [ + "HashJoin 12475.01 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", + "├─TableReader(Build) 9980.01 root partition:all data:Selection", + "│ └─Selection 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))", + "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + "└─TableReader(Probe) 9980.01 root partition:all data:Selection", + " └─Selection 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ], + "Result": [ + "1 1" + ], + "Warning": null + } + ] + }, + { + "Name": "TestHJBuildAndProbeHint4TiFlash", + "Cases": [ + { + "SQL": "select /*+ hash_join_build(t2) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", + "Plan": [ + "TableReader 12500.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12500.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12500.00 mpp[tiflash] test.t1.a, test.t2.a", + " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ hash_join_probe(t2) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", + "Plan": [ + "TableReader 12500.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12500.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12500.00 mpp[tiflash] test.t1.a, test.t2.a", + " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ hash_join_build(t1) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", + "Plan": [ + "TableReader 12500.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12500.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12500.00 mpp[tiflash] test.t1.a, test.t2.a", + " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ hash_join_probe(t1) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", + "Plan": [ + "TableReader 12500.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 12500.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12500.00 mpp[tiflash] test.t1.a, test.t2.a", + " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warning": null + } + ] + }, + { + "Name": "TestCountStarForTiFlash", + "Cases": [ + { + "SQL": "select count(*) from t", + "Plan": [ + "HashAgg 1.00 root funcs:count(Column#12)->Column#10", + "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.t.d)->Column#12", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select count(1), count(3.1415), count(0), count(null) from t -- every count but count(null) can be rewritten", + "Plan": [ + "HashAgg 1.00 root funcs:count(Column#18)->Column#10, funcs:count(Column#19)->Column#11, funcs:count(Column#20)->Column#12, funcs:count(Column#21)->Column#13", + "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.t.d)->Column#18, funcs:count(test.t.d)->Column#19, funcs:count(test.t.d)->Column#20, funcs:count(NULL)->Column#21", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select count(*) from t where a=1", + "Plan": [ + "StreamAgg 1.00 root funcs:count(1)->Column#10", + "└─TableReader 10.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 10.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Selection 10.00 mpp[tiflash] eq(test.t.a, 1)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select count(*) from t_pick_row_id", + "Plan": [ + "HashAgg 1.00 root funcs:count(Column#5)->Column#3", + "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.t_pick_row_id._tidb_rowid)->Column#5", + " └─TableFullScan 10000.00 mpp[tiflash] table:t_pick_row_id keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select t.b, t.c from (select count(*) as c from t) a, t where a.c=t.a -- test recursive", + "Plan": [ + "HashJoin 1.25 root inner join, equal:[eq(test.t.a, Column#10)]", + "├─HashAgg(Build) 1.00 root funcs:count(Column#22)->Column#10", + "│ └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + "│ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + "│ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.t.d)->Column#22", + "│ └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo", + "└─TableReader(Probe) 10000.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select * from t outTable where outTable.a > (select count(*) from t inn where inn.a = outTable.b) -- shouldn't be rewritten for correlated sub query", + "Plan": [ + "Projection 10000.00 root test.t.a, test.t.b, test.t.c, test.t.d, test.t.e, test.t.f, test.t.g, test.t.h", + "└─Apply 10000.00 root CARTESIAN inner join, other cond:gt(test.t.a, Column#19)", + " ├─TableReader(Build) 10000.00 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:outTable keep order:false, stats:pseudo", + " └─HashAgg(Probe) 10000.00 root funcs:count(Column#21)->Column#19", + " └─TableReader 10000.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 10000.00 mpp[tiflash] funcs:count(1)->Column#21", + " └─Selection 80000000.00 mpp[tiflash] eq(cast(test.t.a, double BINARY), cast(test.t.b, double BINARY))", + " └─TableFullScan 100000000.00 mpp[tiflash] table:inn pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select count(*) from t t1, t t2 where t1.a=t2.e -- shouldn't be rewritten when join under agg", + "Plan": [ + "HashAgg 1.00 root funcs:count(Column#20)->Column#19", + "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(1)->Column#20", + " └─Projection 12500.00 mpp[tiflash] test.t.a", + " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.e)]", + " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", + " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", + " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select count(distinct 1) from t -- shouldn't be rewritten", + "Plan": [ + "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 mpp[tiflash] Column#10", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct Column#12)->Column#10", + " └─ExchangeReceiver 1.00 mpp[tiflash] ", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", + " └─HashAgg 1.00 mpp[tiflash] group by:1, ", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select count(1), count(a), count(b) from t -- keep count(1)", + "Plan": [ + "HashAgg 1.00 root funcs:count(Column#16)->Column#10, funcs:count(Column#17)->Column#11, funcs:count(Column#18)->Column#12", + "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(1)->Column#16, funcs:count(test.t.a)->Column#17, funcs:count(test.t.b)->Column#18", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select a, count(*) from t group by a -- shouldn't be rewritten", + "Plan": [ + "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", + "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#10", + " └─Projection 8000.00 mpp[tiflash] Column#10, test.t.a", + " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, funcs:count(1)->Column#10, funcs:firstrow(test.t.a)->test.t.a", + " └─ExchangeReceiver 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select sum(a) from t -- sum shouldn't be rewritten", + "Plan": [ + "HashAgg 1.00 root funcs:sum(Column#12)->Column#10", + "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#15)->Column#12", + " └─Projection 10000.00 mpp[tiflash] cast(test.t.a, decimal(10,0) BINARY)->Column#15", + " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warning": null + } + ] + }, + { + "Name": "TestHashAggPushdownToTiFlashCompute", + "Cases": [ + { + "SQL": "select /*+ agg_to_cop() hash_agg() */ avg( distinct tbl_15.col_96 ) as r0 , min( tbl_15.col_92 ) as r1 , sum( distinct tbl_15.col_91 ) as r2 , max( tbl_15.col_92 ) as r3 from tbl_15 where tbl_15.col_94 != '2033-01-09' and tbl_15.col_93 > 7623.679908049186 order by r0,r1,r2,r3 limit 79 ;", + "Plan": [ + "Limit 1.00 root offset:0, count:79", + "└─Sort 1.00 root Column#11, Column#12, Column#13, Column#14", + " └─HashAgg 1.00 root funcs:avg(distinct Column#89)->Column#11, funcs:min(Column#90)->Column#12, funcs:sum(distinct Column#91)->Column#13, funcs:max(Column#92)->Column#14", + " └─Projection 7100.44 root cast(test.tbl_15.col_96, decimal(10,0) UNSIGNED BINARY)->Column#89, Column#15->Column#90, cast(test.tbl_15.col_91, decimal(3,0) UNSIGNED BINARY)->Column#91, Column#16->Column#92", + " └─PartitionUnion 7100.44 root ", + " ├─TableReader 1775.11 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#18)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#20)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", + " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#18, funcs:max(test.tbl_15.col_92)->Column#20", + " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p0 pushed down filter:empty, keep order:false, stats:pseudo", + " ├─TableReader 1775.11 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#30)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#32)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", + " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#30, funcs:max(test.tbl_15.col_92)->Column#32", + " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p1 pushed down filter:empty, keep order:false, stats:pseudo", + " ├─TableReader 1775.11 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#42)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#44)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", + " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", + " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", + " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#42, funcs:max(test.tbl_15.col_92)->Column#44", + " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p2 pushed down filter:empty, keep order:false, stats:pseudo", + " └─TableReader 1775.11 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#54)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#56)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", + " └─ExchangeReceiver 1775.11 mpp[tiflash] ", + " └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", + " └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#54, funcs:max(test.tbl_15.col_92)->Column#56", + " └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", + " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p3 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ agg_to_cop() hash_agg() */ count(1) from tbl_15 ;", + "Plan": [ + "HashAgg 1.00 root funcs:count(Column#12)->Column#11", + "└─PartitionUnion 4.00 root ", + " ├─HashAgg 1.00 root funcs:count(Column#13)->Column#12", + " │ └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#13", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p0 keep order:false, stats:pseudo", + " ├─HashAgg 1.00 root funcs:count(Column#14)->Column#12", + " │ └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#14", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p1 keep order:false, stats:pseudo", + " ├─HashAgg 1.00 root funcs:count(Column#15)->Column#12", + " │ └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#15", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p2 keep order:false, stats:pseudo", + " └─HashAgg 1.00 root funcs:count(Column#16)->Column#12", + " └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#16", + " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p3 keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select /*+ agg_to_cop() stream_agg() */ avg( tbl_16.col_100 ) as r0 from tbl_16 where tbl_16.col_100 in ( 10672141 ) or tbl_16.col_104 in ( 'yfEG1t!*b' ,'C1*bqx_qyO' ,'vQ^yUpKHr&j#~' ) group by tbl_16.col_100 order by r0 limit 20 ;", + "Plan": [ + "TopN 20.00 root Column#10, offset:0, count:20", + "└─HashAgg 63.95 root group by:test.tbl_16.col_100, funcs:avg(Column#11, Column#12)->Column#10", + " └─PartitionUnion 63.95 root ", + " ├─StreamAgg 31.98 root group by:Column#19, funcs:count(Column#19)->Column#11, funcs:sum(Column#20)->Column#12, funcs:firstrow(Column#21)->test.tbl_16.col_100", + " │ └─Projection 39.97 root test.tbl_16.col_100->Column#19, cast(test.tbl_16.col_100, decimal(8,0) UNSIGNED BINARY)->Column#20, test.tbl_16.col_100->Column#21", + " │ └─Sort 39.97 root test.tbl_16.col_100", + " │ └─TableReader 39.97 root MppVersion: 2, data:ExchangeSender", + " │ └─ExchangeSender 39.97 mpp[tiflash] ExchangeType: PassThrough", + " │ └─Selection 39.97 mpp[tiflash] or(eq(test.tbl_16.col_100, 10672141), in(test.tbl_16.col_104, \"yfEG1t!*b\", \"C1*bqx_qyO\", \"vQ^yUpKHr&j#~\"))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_16, partition:p0 pushed down filter:empty, keep order:false, stats:pseudo", + " └─StreamAgg 31.98 root group by:Column#22, funcs:count(Column#22)->Column#11, funcs:sum(Column#23)->Column#12, funcs:firstrow(Column#24)->test.tbl_16.col_100", + " └─Projection 39.97 root test.tbl_16.col_100->Column#22, cast(test.tbl_16.col_100, decimal(8,0) UNSIGNED BINARY)->Column#23, test.tbl_16.col_100->Column#24", + " └─Sort 39.97 root test.tbl_16.col_100", + " └─TableReader 39.97 root MppVersion: 2, data:ExchangeSender", + " └─ExchangeSender 39.97 mpp[tiflash] ExchangeType: PassThrough", + " └─Selection 39.97 mpp[tiflash] or(eq(test.tbl_16.col_100, 10672141), in(test.tbl_16.col_104, \"yfEG1t!*b\", \"C1*bqx_qyO\", \"vQ^yUpKHr&j#~\"))", + " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_16, partition:p1 pushed down filter:empty, keep order:false, stats:pseudo" + ], + "Warning": null + } + ] + }, + { + "Name": "TestIssues49377Plan", + "Cases": [ + { + "SQL": "select 1,1,1 union all ((select * from employee where dept_id = 1) union all ( select * from employee where dept_id = 1 order by employee_id ) order by 1 );", + "Plan": [ + "Union 21.00 root ", + "├─Projection 1.00 root 1->Column#15, 1->Column#16, 1->Column#17", + "│ └─TableDual 1.00 root rows:1", + "└─Projection 20.00 root cast(Column#12, bigint(11) BINARY)->Column#15, Column#13->Column#16, cast(Column#14, bigint(11) BINARY)->Column#17", + " └─Sort 20.00 root Column#12", + " └─Union 20.00 root ", + " ├─TableReader 10.00 root data:Selection", + " │ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + " │ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", + " └─Sort 10.00 root test.employee.employee_id", + " └─TableReader 10.00 root data:Selection", + " └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + " └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select 1,1,1 union all ((select * from employee where dept_id = 1) union all ( select * from employee where dept_id = 1 order by employee_id ) order by 1 limit 1);", + "Plan": [ + "Union 2.00 root ", + "├─Projection 1.00 root 1->Column#15, 1->Column#16, 1->Column#17", + "│ └─TableDual 1.00 root rows:1", + "└─Projection 1.00 root cast(Column#12, bigint(11) BINARY)->Column#15, Column#13->Column#16, cast(Column#14, bigint(11) BINARY)->Column#17", + " └─TopN 1.00 root Column#12, offset:0, count:1", + " └─Union 2.00 root ", + " ├─TopN 1.00 root test.employee.employee_id, offset:0, count:1", + " │ └─TableReader 1.00 root data:TopN", + " │ └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", + " │ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + " │ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", + " └─TopN 1.00 root test.employee.employee_id, offset:0, count:1", + " └─TableReader 1.00 root data:TopN", + " └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", + " └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + " └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select * from employee where dept_id = 1 union all ( select * from employee where dept_id = 1 order by employee_id) union all ( select * from employee where dept_id = 1 union all ( select * from employee where dept_id = 1 order by employee_id ) limit 1);", + "Plan": [ + "Union 21.00 root ", + "├─TableReader 10.00 root data:Selection", + "│ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + "│ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", + "├─Sort 10.00 root test.employee.employee_id", + "│ └─TableReader 10.00 root data:Selection", + "│ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + "│ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", + "└─Limit 1.00 root offset:0, count:1", + " └─Union 1.00 root ", + " ├─Limit 1.00 root offset:0, count:1", + " │ └─TableReader 1.00 root data:Limit", + " │ └─Limit 1.00 cop[tikv] offset:0, count:1", + " │ └─Selection 1.00 cop[tikv] eq(test.employee.dept_id, 1)", + " │ └─TableFullScan 1000.00 cop[tikv] table:employee keep order:false, stats:pseudo", + " └─TopN 1.00 root test.employee.employee_id, offset:0, count:1", + " └─TableReader 1.00 root data:TopN", + " └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", + " └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + " └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "select * from employee where dept_id = 1 union all ( select * from employee where dept_id = 1 order by employee_id) union all ( select * from employee where dept_id = 1 union all ( select * from employee where dept_id = 1 order by employee_id ) order by 1 limit 1);", + "Plan": [ + "Union 21.00 root ", + "├─TableReader 10.00 root data:Selection", + "│ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + "│ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", + "├─Sort 10.00 root test.employee.employee_id", + "│ └─TableReader 10.00 root data:Selection", + "│ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + "│ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", + "└─TopN 1.00 root Column#17, offset:0, count:1", + " └─Union 2.00 root ", + " ├─TopN 1.00 root test.employee.employee_id, offset:0, count:1", + " │ └─TableReader 1.00 root data:TopN", + " │ └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", + " │ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + " │ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", + " └─TopN 1.00 root test.employee.employee_id, offset:0, count:1", + " └─TableReader 1.00 root data:TopN", + " └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", + " └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", + " └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo" + ], + "Warning": null + } + ] + }, + { + "Name": "TestPointgetIndexChoosen", + "Cases": [ + { + "SQL": "select * from t where b=1 and c='1';", + "Plan": [ + "Point_Get 1.00 root table:t, index:ubc(b, c) " + ], + "Warning": null + }, + { + "SQL": "select * from t where b=1 and c='1' and d='1';", + "Plan": [ + "Selection 0.00 root eq(test.t.d, \"1\")", + "└─Point_Get 1.00 root table:t, index:ubc(b, c) " + ], + "Warning": null + }, + { + "SQL": "select * from t where b in (1,2,3) and c in ('1');", + "Plan": [ + "Batch_Point_Get 3.00 root table:t, index:ubc(b, c) keep order:false, desc:false" + ], + "Warning": null + } + ] + }, + { + "Name": "TestAlwaysTruePredicateWithSubquery", + "Cases": [ + { + "SQL": "SHOW ERRORS WHERE TRUE = ALL ( SELECT TRUE GROUP BY 1 LIMIT 1 ) IS NULL IS NOT NULL;", + "Plan": null, + "Warning": null + }, + { + "SQL": "explain select * from t WHERE TRUE = ALL ( SELECT TRUE GROUP BY 1 LIMIT 1 ) IS NULL IS NOT NULL;", + "Plan": [ + "HashJoin_14 10000.00 root CARTESIAN inner join", + "├─StreamAgg_19(Build) 1.00 root funcs:count(1)->Column#12", + "│ └─Limit_22 1.00 root offset:0, count:1", + "│ └─HashAgg_23 1.00 root group by:1, funcs:firstrow(1)->Column#13", + "│ └─TableDual_24 1.00 root rows:1", + "└─TableReader_17(Probe) 10000.00 root data:TableFullScan_16", + " └─TableFullScan_16 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain select * from t WHERE TRUE = ALL ( SELECT TRUE from t GROUP BY 1 LIMIT 1 ) is null is not null;", + "Plan": [ + "HashJoin_14 10000.00 root CARTESIAN inner join", + "├─StreamAgg_19(Build) 1.00 root funcs:count(1)->Column#15", + "│ └─Limit_22 1.00 root offset:0, count:1", + "│ └─HashAgg_27 1.00 root group by:Column#17, funcs:firstrow(Column#18)->Column#16", + "│ └─TableReader_28 1.00 root data:HashAgg_23", + "│ └─HashAgg_23 1.00 cop[tikv] group by:1, funcs:firstrow(1)->Column#18", + "│ └─TableFullScan_26 10000.00 cop[tikv] table:t keep order:false, stats:pseudo", + "└─TableReader_17(Probe) 10000.00 root data:TableFullScan_16", + " └─TableFullScan_16 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + } + ] + }, + { + "Name": "TestExplainExpand", + "Cases": [ + { + "SQL": "explain format = 'brief' select count(1) from t group by a, b with rollup; -- 1. simple agg", + "Plan": [ + "HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:count(1)->Column#10", + "└─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' select sum(c), count(1) from t group by a, b with rollup; -- 2. non-grouping set col c", + "Plan": [ + "HashAgg 8000.00 root group by:Column#15, Column#16, Column#17, funcs:sum(Column#14)->Column#10, funcs:count(1)->Column#11", + "└─Projection 10000.00 root cast(test.t.c, decimal(10,0) BINARY)->Column#14, Column#7->Column#15, Column#8->Column#16, gid->Column#17", + " └─Expand 10000.00 root level-projection:[test.t.c, ->Column#7, ->Column#8, 0->gid],[test.t.c, Column#7, ->Column#8, 1->gid],[test.t.c, Column#7, Column#8, 3->gid]; schema: [test.t.c,Column#7,Column#8,gid]", + " └─Projection 10000.00 root test.t.c, test.t.a->Column#7, test.t.b->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' select count(a) from t group by a, b with rollup; -- 3. should keep the original col a", + "Plan": [ + "HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:count(test.t.a)->Column#10", + "└─Expand 10000.00 root level-projection:[test.t.a, ->Column#7, ->Column#8, 0->gid],[test.t.a, Column#7, ->Column#8, 1->gid],[test.t.a, Column#7, Column#8, 3->gid]; schema: [test.t.a,Column#7,Column#8,gid]", + " └─Projection 10000.00 root test.t.a, test.t.a->Column#7, test.t.b->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' select grouping(a) from t group by a, b with rollup; -- 4. contain grouping function ref to grouping set column a", + "Plan": [ + "Projection 8000.00 root grouping(gid)->Column#11", + "└─HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:firstrow(gid)->gid", + " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' select grouping(a,b) from t group by a, b with rollup; -- 5. grouping function contains grouping set column a,c", + "Plan": [ + "Projection 8000.00 root grouping(gid)->Column#11", + "└─HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:firstrow(gid)->gid", + " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' select a, grouping(b,a) from t group by a,b with rollup; -- 6. resolve normal column a to grouping set column a'", + "Plan": [ + "Projection 8000.00 root Column#7->Column#11, grouping(gid)->Column#12", + "└─HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(gid)->gid", + " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' select a+1, grouping(b) from t group by a+1, b with rollup; -- 7. resolve field list a+1 to grouping set column a+1", + "Plan": [ + "Projection 8000.00 root Column#7->Column#11, grouping(gid)->Column#12", + "└─HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(gid)->gid", + " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", + " └─Projection 10000.00 root plus(test.t.a, 1)->Column#7, test.t.b->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT SUM(profit) AS profit FROM sales GROUP BY year+2, year+profit WITH ROLLUP order by year+2; -- 8. order by item year+2 resolve to gby grouping expression", + "Plan": [ + "Projection 8000.00 root Column#10", + "└─Sort 8000.00 root Column#7", + " └─HashAgg 8000.00 root group by:Column#15, Column#16, Column#17, funcs:sum(Column#14)->Column#10, funcs:firstrow(Column#15)->Column#7", + " └─Projection 10000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#14, Column#7->Column#15, Column#8->Column#16, gid->Column#17", + " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid],[test.sales.profit, Column#7, ->Column#8, 1->gid],[test.sales.profit, Column#7, Column#8, 3->gid]; schema: [test.sales.profit,Column#7,Column#8,gid]", + " └─Projection 10000.00 root test.sales.profit, plus(test.sales.year, 2)->Column#7, plus(test.sales.year, test.sales.profit)->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT year+2, SUM(profit) AS profit FROM sales GROUP BY year+2, year+profit WITH ROLLUP order by year+2; -- 9. order by item year+2 resolve to select field", + "Plan": [ + "Projection 8000.00 root Column#7->Column#11, Column#10", + "└─Sort 8000.00 root Column#7", + " └─HashAgg 8000.00 root group by:Column#16, Column#17, Column#18, funcs:sum(Column#15)->Column#10, funcs:firstrow(Column#16)->Column#7", + " └─Projection 10000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#15, Column#7->Column#16, Column#8->Column#17, gid->Column#18", + " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid],[test.sales.profit, Column#7, ->Column#8, 1->gid],[test.sales.profit, Column#7, Column#8, 3->gid]; schema: [test.sales.profit,Column#7,Column#8,gid]", + " └─Projection 10000.00 root test.sales.profit, plus(test.sales.year, 2)->Column#7, plus(test.sales.year, test.sales.profit)->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT year+2 as y, SUM(profit) as profit FROM sales GROUP BY year+2, year+profit WITH ROLLUP having y > 2002 order by year+2, profit; -- 10. having (year+2) shouldn't be pushed down", + "Plan": [ + "Projection 6400.00 root Column#7, Column#10", + "└─Sort 6400.00 root Column#7, Column#10", + " └─HashAgg 6400.00 root group by:Column#16, Column#17, Column#18, funcs:sum(Column#15)->Column#10, funcs:firstrow(Column#16)->Column#7", + " └─Projection 8000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#15, Column#7->Column#16, Column#8->Column#17, gid->Column#18", + " └─Selection 8000.00 root gt(Column#7, 2002)", + " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid],[test.sales.profit, Column#7, ->Column#8, 1->gid],[test.sales.profit, Column#7, Column#8, 3->gid]; schema: [test.sales.profit,Column#7,Column#8,gid]", + " └─Projection 10000.00 root test.sales.profit, plus(test.sales.year, 2)->Column#7, plus(test.sales.year, test.sales.profit)->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT year+2 as y, SUM(profit) AS profit, grouping(year+2) FROM sales GROUP BY year+2, year+profit WITH ROLLUP having y > 2002 order by year+2, profit; -- 11. grouping function validation", + "Plan": [ + "Sort 6400.00 root Column#7, Column#10", + "└─Projection 6400.00 root Column#7, Column#10, grouping(gid)->Column#11", + " └─HashAgg 6400.00 root group by:Column#19, Column#20, Column#21, funcs:sum(Column#18)->Column#10, funcs:firstrow(Column#19)->Column#7, funcs:firstrow(Column#20)->gid", + " └─Projection 8000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#18, Column#7->Column#19, gid->Column#20, Column#8->Column#21", + " └─Selection 8000.00 root gt(Column#7, 2002)", + " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid],[test.sales.profit, Column#7, ->Column#8, 1->gid],[test.sales.profit, Column#7, Column#8, 3->gid]; schema: [test.sales.profit,Column#7,Column#8,gid]", + " └─Projection 10000.00 root test.sales.profit, plus(test.sales.year, 2)->Column#7, plus(test.sales.year, test.sales.profit)->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT year, country, product, SUM(profit) AS profit FROM sales GROUP BY year, country, product with rollup order by grouping(year); -- 12. grouping function in order by clause", + "Plan": [ + "Projection 8000.00 root Column#7, Column#8->Column#13, Column#9->Column#14, Column#11", + "└─Projection 8000.00 root Column#11, Column#7, Column#8, Column#9, gid", + " └─Sort 8000.00 root Column#21", + " └─Projection 8000.00 root Column#11, Column#7, Column#8, Column#9, gid, grouping(gid)->Column#21", + " └─HashAgg 8000.00 root group by:Column#17, Column#18, Column#19, Column#20, funcs:sum(Column#16)->Column#11, funcs:firstrow(Column#17)->Column#7, funcs:firstrow(Column#18)->Column#8, funcs:firstrow(Column#19)->Column#9, funcs:firstrow(Column#20)->gid", + " └─Projection 10000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#16, Column#7->Column#17, Column#8->Column#18, Column#9->Column#19, gid->Column#20", + " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, ->Column#9, 0->gid],[test.sales.profit, Column#7, ->Column#8, ->Column#9, 1->gid],[test.sales.profit, Column#7, Column#8, ->Column#9, 3->gid],[test.sales.profit, Column#7, Column#8, Column#9, 7->gid]; schema: [test.sales.profit,Column#7,Column#8,Column#9,gid]", + " └─Projection 10000.00 root test.sales.profit, test.sales.year->Column#7, test.sales.country->Column#8, test.sales.product->Column#9", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT country, product, SUM(profit) AS profit FROM sales GROUP BY country, country, product with rollup order by grouping(country); -- 13. 12 under gpos case", + "Plan": [ + "Projection 8000.00 root Column#7, Column#8->Column#13, Column#11", + "└─Projection 8000.00 root Column#11, Column#7, Column#8, gid", + " └─Sort 8000.00 root Column#20", + " └─Projection 8000.00 root Column#11, Column#7, Column#8, gid, grouping(gid)->Column#20", + " └─HashAgg 8000.00 root group by:Column#16, Column#16, Column#17, Column#18, Column#19, funcs:sum(Column#15)->Column#11, funcs:firstrow(Column#16)->Column#7, funcs:firstrow(Column#17)->Column#8, funcs:firstrow(Column#18)->gid", + " └─Projection 10000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#15, Column#7->Column#16, Column#8->Column#17, gid->Column#18, gpos->Column#19", + " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid, 0->gpos],[test.sales.profit, Column#7, ->Column#8, 1->gid, 1->gpos],[test.sales.profit, Column#7, ->Column#8, 1->gid, 2->gpos],[test.sales.profit, Column#7, Column#8, 3->gid, 3->gpos]; schema: [test.sales.profit,Column#7,Column#8,gid,gpos]", + " └─Projection 10000.00 root test.sales.profit, test.sales.country->Column#7, test.sales.product->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT year, country, product, SUM(profit) AS profit FROM sales GROUP BY year, country, product with rollup having grouping(year) > 0 order by grouping(year); -- 14. grouping function in having clause", + "Plan": [ + "Projection 6400.00 root Column#7, Column#8->Column#13, Column#9->Column#14, Column#11", + "└─Projection 6400.00 root Column#11, Column#7, Column#8, Column#9, gid", + " └─Sort 6400.00 root Column#21", + " └─Projection 6400.00 root Column#11, Column#7, Column#8, Column#9, gid, grouping(gid)->Column#21", + " └─HashAgg 6400.00 root group by:Column#17, Column#18, Column#19, Column#20, funcs:sum(Column#16)->Column#11, funcs:firstrow(Column#17)->Column#7, funcs:firstrow(Column#18)->Column#8, funcs:firstrow(Column#19)->Column#9, funcs:firstrow(Column#20)->gid", + " └─Projection 8000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#16, Column#7->Column#17, Column#8->Column#18, Column#9->Column#19, gid->Column#20", + " └─Selection 8000.00 root gt(grouping(gid), 0)", + " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, ->Column#9, 0->gid],[test.sales.profit, Column#7, ->Column#8, ->Column#9, 1->gid],[test.sales.profit, Column#7, Column#8, ->Column#9, 3->gid],[test.sales.profit, Column#7, Column#8, Column#9, 7->gid]; schema: [test.sales.profit,Column#7,Column#8,Column#9,gid]", + " └─Projection 10000.00 root test.sales.profit, test.sales.year->Column#7, test.sales.country->Column#8, test.sales.product->Column#9", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT country, product, SUM(profit) AS profit FROM sales GROUP BY country, country, product with rollup having grouping(country) > 0 order by grouping(country); -- 15. 14 under gpos case", + "Plan": [ + "Projection 6400.00 root Column#7, Column#8->Column#13, Column#11", + "└─Projection 6400.00 root Column#11, Column#7, Column#8, gid", + " └─Sort 6400.00 root Column#20", + " └─Projection 6400.00 root Column#11, Column#7, Column#8, gid, grouping(gid)->Column#20", + " └─HashAgg 6400.00 root group by:Column#16, Column#16, Column#17, Column#18, Column#19, funcs:sum(Column#15)->Column#11, funcs:firstrow(Column#16)->Column#7, funcs:firstrow(Column#17)->Column#8, funcs:firstrow(Column#18)->gid", + " └─Projection 8000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#15, Column#7->Column#16, Column#8->Column#17, gid->Column#18, gpos->Column#19", + " └─Selection 8000.00 root gt(grouping(gid), 0)", + " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid, 0->gpos],[test.sales.profit, Column#7, ->Column#8, 1->gid, 1->gpos],[test.sales.profit, Column#7, ->Column#8, 1->gid, 2->gpos],[test.sales.profit, Column#7, Column#8, 3->gid, 3->gpos]; schema: [test.sales.profit,Column#7,Column#8,gid,gpos]", + " └─Projection 10000.00 root test.sales.profit, test.sales.country->Column#7, test.sales.product->Column#8", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + }, + { + "SQL": "explain format = 'brief' SELECT year, country, product, grouping(year, country, product) from sales group by year, country, product with rollup having grouping(year, country, product) <> 0; -- 16. grouping function recreating fix", + "Plan": [ + "Projection 6400.00 root Column#7->Column#12, Column#8->Column#13, Column#9->Column#14, grouping(gid)->Column#15", + "└─HashAgg 6400.00 root group by:Column#7, Column#8, Column#9, gid, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(Column#8)->Column#8, funcs:firstrow(Column#9)->Column#9, funcs:firstrow(gid)->gid", + " └─Selection 8000.00 root ne(grouping(gid), 0)", + " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, ->Column#9, 0->gid],[Column#7, ->Column#8, ->Column#9, 1->gid],[Column#7, Column#8, ->Column#9, 3->gid],[Column#7, Column#8, Column#9, 7->gid]; schema: [Column#7,Column#8,Column#9,gid]", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" + ], + "Warning": null + } + ] + } +] diff --git a/pkg/planner/core/find_best_task.go b/pkg/planner/core/find_best_task.go new file mode 100644 index 0000000000000..c098a8320b796 --- /dev/null +++ b/pkg/planner/core/find_best_task.go @@ -0,0 +1,3016 @@ +// Copyright 2017 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "cmp" + "fmt" + "math" + "slices" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/planner/cardinality" + "github.com/pingcap/tidb/pkg/planner/core/base" + "github.com/pingcap/tidb/pkg/planner/core/cost" + "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" + "github.com/pingcap/tidb/pkg/planner/property" + "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" + "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" + "github.com/pingcap/tidb/pkg/planner/util/utilfuncp" + "github.com/pingcap/tidb/pkg/statistics" + "github.com/pingcap/tidb/pkg/types" + tidbutil "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/chunk" + "github.com/pingcap/tidb/pkg/util/collate" + h "github.com/pingcap/tidb/pkg/util/hint" + "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/ranger" + "github.com/pingcap/tidb/pkg/util/tracing" + "go.uber.org/zap" +) + +// PlanCounterDisabled is the default value of PlanCounterTp, indicating that optimizer needn't force a plan. +var PlanCounterDisabled base.PlanCounterTp = -1 + +// GetPropByOrderByItems will check if this sort property can be pushed or not. In order to simplify the problem, we only +// consider the case that all expression are columns. +func GetPropByOrderByItems(items []*util.ByItems) (*property.PhysicalProperty, bool) { + propItems := make([]property.SortItem, 0, len(items)) + for _, item := range items { + col, ok := item.Expr.(*expression.Column) + if !ok { + return nil, false + } + propItems = append(propItems, property.SortItem{Col: col, Desc: item.Desc}) + } + return &property.PhysicalProperty{SortItems: propItems}, true +} + +// GetPropByOrderByItemsContainScalarFunc will check if this sort property can be pushed or not. In order to simplify the +// problem, we only consider the case that all expression are columns or some special scalar functions. +func GetPropByOrderByItemsContainScalarFunc(items []*util.ByItems) (*property.PhysicalProperty, bool, bool) { + propItems := make([]property.SortItem, 0, len(items)) + onlyColumn := true + for _, item := range items { + switch expr := item.Expr.(type) { + case *expression.Column: + propItems = append(propItems, property.SortItem{Col: expr, Desc: item.Desc}) + case *expression.ScalarFunction: + col, desc := expr.GetSingleColumn(item.Desc) + if col == nil { + return nil, false, false + } + propItems = append(propItems, property.SortItem{Col: col, Desc: desc}) + onlyColumn = false + default: + return nil, false, false + } + } + return &property.PhysicalProperty{SortItems: propItems}, true, onlyColumn +} + +func findBestTask4LogicalTableDual(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, opt *optimizetrace.PhysicalOptimizeOp) (base.Task, int64, error) { + p := lp.(*logicalop.LogicalTableDual) + // If the required property is not empty and the row count > 1, + // we cannot ensure this required property. + // But if the row count is 0 or 1, we don't need to care about the property. + if (!prop.IsSortItemEmpty() && p.RowCount > 1) || planCounter.Empty() { + return base.InvalidTask, 0, nil + } + dual := PhysicalTableDual{ + RowCount: p.RowCount, + }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset()) + dual.SetSchema(p.Schema()) + planCounter.Dec(1) + utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, p, dual, prop) + rt := &RootTask{} + rt.SetPlan(dual) + rt.SetEmpty(p.RowCount == 0) + return rt, 1, nil +} + +func findBestTask4LogicalShow(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, _ *optimizetrace.PhysicalOptimizeOp) (base.Task, int64, error) { + p := lp.(*logicalop.LogicalShow) + if !prop.IsSortItemEmpty() || planCounter.Empty() { + return base.InvalidTask, 0, nil + } + pShow := PhysicalShow{ShowContents: p.ShowContents, Extractor: p.Extractor}.Init(p.SCtx()) + pShow.SetSchema(p.Schema()) + planCounter.Dec(1) + rt := &RootTask{} + rt.SetPlan(pShow) + return rt, 1, nil +} + +func findBestTask4LogicalShowDDLJobs(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, _ *optimizetrace.PhysicalOptimizeOp) (base.Task, int64, error) { + p := lp.(*logicalop.LogicalShowDDLJobs) + if !prop.IsSortItemEmpty() || planCounter.Empty() { + return base.InvalidTask, 0, nil + } + pShow := PhysicalShowDDLJobs{JobNumber: p.JobNumber}.Init(p.SCtx()) + pShow.SetSchema(p.Schema()) + planCounter.Dec(1) + rt := &RootTask{} + rt.SetPlan(pShow) + return rt, 1, nil +} + +// rebuildChildTasks rebuilds the childTasks to make the clock_th combination. +func rebuildChildTasks(p *logicalop.BaseLogicalPlan, childTasks *[]base.Task, pp base.PhysicalPlan, childCnts []int64, planCounter int64, ts uint64, opt *optimizetrace.PhysicalOptimizeOp) error { + // The taskMap of children nodes should be rolled back first. + for _, child := range p.Children() { + child.RollBackTaskMap(ts) + } + + multAll := int64(1) + var curClock base.PlanCounterTp + for _, x := range childCnts { + multAll *= x + } + *childTasks = (*childTasks)[:0] + for j, child := range p.Children() { + multAll /= childCnts[j] + curClock = base.PlanCounterTp((planCounter-1)/multAll + 1) + childTask, _, err := child.FindBestTask(pp.GetChildReqProps(j), &curClock, opt) + planCounter = (planCounter-1)%multAll + 1 + if err != nil { + return err + } + if curClock != 0 { + return errors.Errorf("PlanCounterTp planCounter is not handled") + } + if childTask != nil && childTask.Invalid() { + return errors.Errorf("The current plan is invalid, please skip this plan") + } + *childTasks = append(*childTasks, childTask) + } + return nil +} + +func enumeratePhysicalPlans4Task( + p *logicalop.BaseLogicalPlan, + physicalPlans []base.PhysicalPlan, + prop *property.PhysicalProperty, + addEnforcer bool, + planCounter *base.PlanCounterTp, + opt *optimizetrace.PhysicalOptimizeOp, +) (base.Task, int64, error) { + var bestTask base.Task = base.InvalidTask + var curCntPlan, cntPlan int64 + var err error + childTasks := make([]base.Task, 0, p.ChildLen()) + childCnts := make([]int64, p.ChildLen()) + cntPlan = 0 + iteration := iteratePhysicalPlan4BaseLogical + if _, ok := p.Self().(*logicalop.LogicalSequence); ok { + iteration = iterateChildPlan4LogicalSequence + } + + for _, pp := range physicalPlans { + timeStampNow := p.GetLogicalTS4TaskMap() + savedPlanID := p.SCtx().GetSessionVars().PlanID.Load() + + childTasks, curCntPlan, childCnts, err = iteration(p, pp, childTasks, childCnts, prop, opt) + if err != nil { + return nil, 0, err + } + + // This check makes sure that there is no invalid child task. + if len(childTasks) != p.ChildLen() { + continue + } + + // If the target plan can be found in this physicalPlan(pp), rebuild childTasks to build the corresponding combination. + if planCounter.IsForce() && int64(*planCounter) <= curCntPlan { + p.SCtx().GetSessionVars().PlanID.Store(savedPlanID) + curCntPlan = int64(*planCounter) + err := rebuildChildTasks(p, &childTasks, pp, childCnts, int64(*planCounter), timeStampNow, opt) + if err != nil { + return nil, 0, err + } + } + + // Combine the best child tasks with parent physical plan. + curTask := pp.Attach2Task(childTasks...) + if curTask.Invalid() { + continue + } + + // An optimal task could not satisfy the property, so it should be converted here. + if _, ok := curTask.(*RootTask); !ok && prop.TaskTp == property.RootTaskType { + curTask = curTask.ConvertToRootTask(p.SCtx()) + } + + // Enforce curTask property + if addEnforcer { + curTask = enforceProperty(prop, curTask, p.Plan.SCtx()) + } + + // Optimize by shuffle executor to running in parallel manner. + if _, isMpp := curTask.(*MppTask); !isMpp && prop.IsSortItemEmpty() { + // Currently, we do not regard shuffled plan as a new plan. + curTask = optimizeByShuffle(curTask, p.Plan.SCtx()) + } + + cntPlan += curCntPlan + planCounter.Dec(curCntPlan) + + if planCounter.Empty() { + bestTask = curTask + break + } + utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, p, curTask.Plan(), prop) + // Get the most efficient one. + if curIsBetter, err := compareTaskCost(curTask, bestTask, opt); err != nil { + return nil, 0, err + } else if curIsBetter { + bestTask = curTask + } + } + return bestTask, cntPlan, nil +} + +// iteratePhysicalPlan4BaseLogical is used to iterate the physical plan and get all child tasks. +func iteratePhysicalPlan4BaseLogical( + p *logicalop.BaseLogicalPlan, + selfPhysicalPlan base.PhysicalPlan, + childTasks []base.Task, + childCnts []int64, + _ *property.PhysicalProperty, + opt *optimizetrace.PhysicalOptimizeOp, +) ([]base.Task, int64, []int64, error) { + // Find best child tasks firstly. + childTasks = childTasks[:0] + // The curCntPlan records the number of possible plans for pp + curCntPlan := int64(1) + for j, child := range p.Children() { + childProp := selfPhysicalPlan.GetChildReqProps(j) + childTask, cnt, err := child.FindBestTask(childProp, &PlanCounterDisabled, opt) + childCnts[j] = cnt + if err != nil { + return nil, 0, childCnts, err + } + curCntPlan = curCntPlan * cnt + if childTask != nil && childTask.Invalid() { + return nil, 0, childCnts, nil + } + childTasks = append(childTasks, childTask) + } + + // This check makes sure that there is no invalid child task. + if len(childTasks) != p.ChildLen() { + return nil, 0, childCnts, nil + } + return childTasks, curCntPlan, childCnts, nil +} + +// iterateChildPlan4LogicalSequence does the special part for sequence. We need to iterate its child one by one to check whether the former child is a valid plan and then go to the nex +func iterateChildPlan4LogicalSequence( + p *logicalop.BaseLogicalPlan, + selfPhysicalPlan base.PhysicalPlan, + childTasks []base.Task, + childCnts []int64, + prop *property.PhysicalProperty, + opt *optimizetrace.PhysicalOptimizeOp, +) ([]base.Task, int64, []int64, error) { + // Find best child tasks firstly. + childTasks = childTasks[:0] + // The curCntPlan records the number of possible plans for pp + curCntPlan := int64(1) + lastIdx := p.ChildLen() - 1 + for j := 0; j < lastIdx; j++ { + child := p.Children()[j] + childProp := selfPhysicalPlan.GetChildReqProps(j) + childTask, cnt, err := child.FindBestTask(childProp, &PlanCounterDisabled, opt) + childCnts[j] = cnt + if err != nil { + return nil, 0, nil, err + } + curCntPlan = curCntPlan * cnt + if childTask != nil && childTask.Invalid() { + return nil, 0, nil, nil + } + _, isMpp := childTask.(*MppTask) + if !isMpp && prop.IsFlashProp() { + break + } + childTasks = append(childTasks, childTask) + } + // This check makes sure that there is no invalid child task. + if len(childTasks) != p.ChildLen()-1 { + return nil, 0, nil, nil + } + + lastChildProp := selfPhysicalPlan.GetChildReqProps(lastIdx).CloneEssentialFields() + if lastChildProp.IsFlashProp() { + lastChildProp.CTEProducerStatus = property.AllCTECanMpp + } + lastChildTask, cnt, err := p.Children()[lastIdx].FindBestTask(lastChildProp, &PlanCounterDisabled, opt) + childCnts[lastIdx] = cnt + if err != nil { + return nil, 0, nil, err + } + curCntPlan = curCntPlan * cnt + if lastChildTask != nil && lastChildTask.Invalid() { + return nil, 0, nil, nil + } + + if _, ok := lastChildTask.(*MppTask); !ok && lastChildProp.CTEProducerStatus == property.AllCTECanMpp { + return nil, 0, nil, nil + } + + childTasks = append(childTasks, lastChildTask) + return childTasks, curCntPlan, childCnts, nil +} + +// compareTaskCost compares cost of curTask and bestTask and returns whether curTask's cost is smaller than bestTask's. +func compareTaskCost(curTask, bestTask base.Task, op *optimizetrace.PhysicalOptimizeOp) (curIsBetter bool, err error) { + curCost, curInvalid, err := utilfuncp.GetTaskPlanCost(curTask, op) + if err != nil { + return false, err + } + bestCost, bestInvalid, err := utilfuncp.GetTaskPlanCost(bestTask, op) + if err != nil { + return false, err + } + if curInvalid { + return false, nil + } + if bestInvalid { + return true, nil + } + return curCost < bestCost, nil +} + +// getTaskPlanCost returns the cost of this task. +// The new cost interface will be used if EnableNewCostInterface is true. +// The second returned value indicates whether this task is valid. +func getTaskPlanCost(t base.Task, pop *optimizetrace.PhysicalOptimizeOp) (float64, bool, error) { + if t.Invalid() { + return math.MaxFloat64, true, nil + } + + // use the new cost interface + var ( + taskType property.TaskType + indexPartialCost float64 + ) + switch t.(type) { + case *RootTask: + taskType = property.RootTaskType + case *CopTask: // no need to know whether the task is single-read or double-read, so both CopSingleReadTaskType and CopDoubleReadTaskType are OK + cop := t.(*CopTask) + if cop.indexPlan != nil && cop.tablePlan != nil { // handle IndexLookup specially + taskType = property.CopMultiReadTaskType + // keep compatible with the old cost interface, for CopMultiReadTask, the cost is idxCost + tblCost. + if !cop.indexPlanFinished { // only consider index cost in this case + idxCost, err := getPlanCost(cop.indexPlan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) + return idxCost, false, err + } + // consider both sides + idxCost, err := getPlanCost(cop.indexPlan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) + if err != nil { + return 0, false, err + } + tblCost, err := getPlanCost(cop.tablePlan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) + if err != nil { + return 0, false, err + } + return idxCost + tblCost, false, nil + } + + taskType = property.CopSingleReadTaskType + + // TiFlash can run cop task as well, check whether this cop task will run on TiKV or TiFlash. + if cop.tablePlan != nil { + leafNode := cop.tablePlan + for len(leafNode.Children()) > 0 { + leafNode = leafNode.Children()[0] + } + if tblScan, isScan := leafNode.(*PhysicalTableScan); isScan && tblScan.StoreType == kv.TiFlash { + taskType = property.MppTaskType + } + } + + // Detail reason ref about comment in function `convertToIndexMergeScan` + // for cop task with {indexPlan=nil, tablePlan=xxx, idxMergePartPlans=[x,x,x], indexPlanFinished=true} we should + // plus the partial index plan cost into the final cost. Because t.plan() the below code used only calculate the + // cost about table plan. + if cop.indexPlanFinished && len(cop.idxMergePartPlans) != 0 { + for _, partialScan := range cop.idxMergePartPlans { + partialCost, err := getPlanCost(partialScan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) + if err != nil { + return 0, false, err + } + indexPartialCost += partialCost + } + } + case *MppTask: + taskType = property.MppTaskType + default: + return 0, false, errors.New("unknown task type") + } + if t.Plan() == nil { + // It's a very special case for index merge case. + // t.plan() == nil in index merge COP case, it means indexPlanFinished is false in other words. + cost := 0.0 + copTsk := t.(*CopTask) + for _, partialScan := range copTsk.idxMergePartPlans { + partialCost, err := getPlanCost(partialScan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) + if err != nil { + return 0, false, err + } + cost += partialCost + } + return cost, false, nil + } + cost, err := getPlanCost(t.Plan(), taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) + return cost + indexPartialCost, false, err +} + +func appendCandidate4PhysicalOptimizeOp(pop *optimizetrace.PhysicalOptimizeOp, lp base.LogicalPlan, pp base.PhysicalPlan, prop *property.PhysicalProperty) { + if pop == nil || pop.GetTracer() == nil || pp == nil { + return + } + candidate := &tracing.CandidatePlanTrace{ + PlanTrace: &tracing.PlanTrace{TP: pp.TP(), ID: pp.ID(), + ExplainInfo: pp.ExplainInfo(), ProperType: prop.String()}, + MappingLogicalPlan: tracing.CodecPlanName(lp.TP(), lp.ID())} + pop.GetTracer().AppendCandidate(candidate) + + // for PhysicalIndexMergeJoin/PhysicalIndexHashJoin/PhysicalIndexJoin, it will use innerTask as a child instead of calling findBestTask, + // and innerTask.plan() will be appended to planTree in appendChildCandidate using empty MappingLogicalPlan field, so it won't mapping with the logic plan, + // that will cause no physical plan when the logic plan got selected. + // the fix to add innerTask.plan() to planTree and mapping correct logic plan + index := -1 + var plan base.PhysicalPlan + switch join := pp.(type) { + case *PhysicalIndexMergeJoin: + index = join.InnerChildIdx + plan = join.innerPlan + case *PhysicalIndexHashJoin: + index = join.InnerChildIdx + plan = join.innerPlan + case *PhysicalIndexJoin: + index = join.InnerChildIdx + plan = join.innerPlan + } + if index != -1 { + child := lp.(*logicalop.BaseLogicalPlan).Children()[index] + candidate := &tracing.CandidatePlanTrace{ + PlanTrace: &tracing.PlanTrace{TP: plan.TP(), ID: plan.ID(), + ExplainInfo: plan.ExplainInfo(), ProperType: prop.String()}, + MappingLogicalPlan: tracing.CodecPlanName(child.TP(), child.ID())} + pop.GetTracer().AppendCandidate(candidate) + } + pp.AppendChildCandidate(pop) +} + +func appendPlanCostDetail4PhysicalOptimizeOp(pop *optimizetrace.PhysicalOptimizeOp, detail *tracing.PhysicalPlanCostDetail) { + if pop == nil || pop.GetTracer() == nil { + return + } + pop.GetTracer().PhysicalPlanCostDetails[fmt.Sprintf("%v_%v", detail.GetPlanType(), detail.GetPlanID())] = detail +} + +// findBestTask is key workflow that drive logic plan tree to generate optimal physical ones. +// The logic inside it is mainly about physical plan numeration and task encapsulation, it should +// be defined in core pkg, and be called by logic plan in their logic interface implementation. +func findBestTask(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, + opt *optimizetrace.PhysicalOptimizeOp) (bestTask base.Task, cntPlan int64, err error) { + p := lp.GetBaseLogicalPlan().(*logicalop.BaseLogicalPlan) + // If p is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself, + // and set inner child prop nil, so here we do nothing. + if prop == nil { + return nil, 1, nil + } + // Look up the task with this prop in the task map. + // It's used to reduce double counting. + bestTask = p.GetTask(prop) + if bestTask != nil { + planCounter.Dec(1) + return bestTask, 1, nil + } + + canAddEnforcer := prop.CanAddEnforcer + + if prop.TaskTp != property.RootTaskType && !prop.IsFlashProp() { + // Currently all plan cannot totally push down to TiKV. + p.StoreTask(prop, base.InvalidTask) + return base.InvalidTask, 0, nil + } + + cntPlan = 0 + // prop should be read only because its cached hashcode might be not consistent + // when it is changed. So we clone a new one for the temporary changes. + newProp := prop.CloneEssentialFields() + var plansFitsProp, plansNeedEnforce []base.PhysicalPlan + var hintWorksWithProp bool + // Maybe the plan can satisfy the required property, + // so we try to get the task without the enforced sort first. + plansFitsProp, hintWorksWithProp, err = p.Self().ExhaustPhysicalPlans(newProp) + if err != nil { + return nil, 0, err + } + if !hintWorksWithProp && !newProp.IsSortItemEmpty() { + // If there is a hint in the plan and the hint cannot satisfy the property, + // we enforce this property and try to generate the PhysicalPlan again to + // make sure the hint can work. + canAddEnforcer = true + } + + if canAddEnforcer { + // Then, we use the empty property to get physicalPlans and + // try to get the task with an enforced sort. + newProp.SortItems = []property.SortItem{} + newProp.SortItemsForPartition = []property.SortItem{} + newProp.ExpectedCnt = math.MaxFloat64 + newProp.MPPPartitionCols = nil + newProp.MPPPartitionTp = property.AnyType + var hintCanWork bool + plansNeedEnforce, hintCanWork, err = p.Self().ExhaustPhysicalPlans(newProp) + if err != nil { + return nil, 0, err + } + if hintCanWork && !hintWorksWithProp { + // If the hint can work with the empty property, but cannot work with + // the required property, we give up `plansFitProp` to make sure the hint + // can work. + plansFitsProp = nil + } + if !hintCanWork && !hintWorksWithProp && !prop.CanAddEnforcer { + // If the original property is not enforced and hint cannot + // work anyway, we give up `plansNeedEnforce` for efficiency, + plansNeedEnforce = nil + } + newProp = prop + } + + var cnt int64 + var curTask base.Task + if bestTask, cnt, err = enumeratePhysicalPlans4Task(p, plansFitsProp, newProp, false, planCounter, opt); err != nil { + return nil, 0, err + } + cntPlan += cnt + if planCounter.Empty() { + goto END + } + + curTask, cnt, err = enumeratePhysicalPlans4Task(p, plansNeedEnforce, newProp, true, planCounter, opt) + if err != nil { + return nil, 0, err + } + cntPlan += cnt + if planCounter.Empty() { + bestTask = curTask + goto END + } + utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, p, curTask.Plan(), prop) + if curIsBetter, err := compareTaskCost(curTask, bestTask, opt); err != nil { + return nil, 0, err + } else if curIsBetter { + bestTask = curTask + } + +END: + p.StoreTask(prop, bestTask) + return bestTask, cntPlan, nil +} + +func findBestTask4LogicalMemTable(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, opt *optimizetrace.PhysicalOptimizeOp) (t base.Task, cntPlan int64, err error) { + p := lp.(*logicalop.LogicalMemTable) + if prop.MPPPartitionTp != property.AnyType { + return base.InvalidTask, 0, nil + } + + // If prop.CanAddEnforcer is true, the prop.SortItems need to be set nil for p.findBestTask. + // Before function return, reset it for enforcing task prop. + oldProp := prop.CloneEssentialFields() + if prop.CanAddEnforcer { + // First, get the bestTask without enforced prop + prop.CanAddEnforcer = false + cnt := int64(0) + t, cnt, err = p.FindBestTask(prop, planCounter, opt) + if err != nil { + return nil, 0, err + } + prop.CanAddEnforcer = true + if t != base.InvalidTask { + cntPlan = cnt + return + } + // Next, get the bestTask with enforced prop + prop.SortItems = []property.SortItem{} + } + defer func() { + if err != nil { + return + } + if prop.CanAddEnforcer { + *prop = *oldProp + t = enforceProperty(prop, t, p.Plan.SCtx()) + prop.CanAddEnforcer = true + } + }() + + if !prop.IsSortItemEmpty() || planCounter.Empty() { + return base.InvalidTask, 0, nil + } + memTable := PhysicalMemTable{ + DBName: p.DBName, + Table: p.TableInfo, + Columns: p.Columns, + Extractor: p.Extractor, + QueryTimeRange: p.QueryTimeRange, + }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset()) + memTable.SetSchema(p.Schema()) + planCounter.Dec(1) + utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, p, memTable, prop) + rt := &RootTask{} + rt.SetPlan(memTable) + return rt, 1, nil +} + +// tryToGetDualTask will check if the push down predicate has false constant. If so, it will return table dual. +func tryToGetDualTask(ds *DataSource) (base.Task, error) { + for _, cond := range ds.PushedDownConds { + if con, ok := cond.(*expression.Constant); ok && con.DeferredExpr == nil && con.ParamMarker == nil { + result, _, err := expression.EvalBool(ds.SCtx().GetExprCtx().GetEvalCtx(), []expression.Expression{cond}, chunk.Row{}) + if err != nil { + return nil, err + } + if !result { + dual := PhysicalTableDual{}.Init(ds.SCtx(), ds.StatsInfo(), ds.QueryBlockOffset()) + dual.SetSchema(ds.Schema()) + rt := &RootTask{} + rt.SetPlan(dual) + return rt, nil + } + } + } + return nil, nil +} + +// candidatePath is used to maintain required info for skyline pruning. +type candidatePath struct { + path *util.AccessPath + accessCondsColMap util.Col2Len // accessCondsColMap maps Column.UniqueID to column length for the columns in AccessConds. + indexCondsColMap util.Col2Len // indexCondsColMap maps Column.UniqueID to column length for the columns in AccessConds and indexFilters. + isMatchProp bool +} + +func compareBool(l, r bool) int { + if l == r { + return 0 + } + if !l { + return -1 + } + return 1 +} + +func compareIndexBack(lhs, rhs *candidatePath) (int, bool) { + result := compareBool(lhs.path.IsSingleScan, rhs.path.IsSingleScan) + if result == 0 && !lhs.path.IsSingleScan { + // if both lhs and rhs need to access table after IndexScan, we utilize the set of columns that occurred in AccessConds and IndexFilters + // to compare how many table rows will be accessed. + return util.CompareCol2Len(lhs.indexCondsColMap, rhs.indexCondsColMap) + } + return result, true +} + +func compareGlobalIndex(lhs, rhs *candidatePath) int { + if lhs.path.IsTablePath() || rhs.path.IsTablePath() || + len(lhs.path.PartialIndexPaths) != 0 || len(rhs.path.PartialIndexPaths) != 0 { + return 0 + } + return compareBool(lhs.path.Index.Global, rhs.path.Index.Global) +} + +// compareCandidates is the core of skyline pruning, which is used to decide which candidate path is better. +// The return value is 1 if lhs is better, -1 if rhs is better, 0 if they are equivalent or not comparable. +func compareCandidates(sctx base.PlanContext, prop *property.PhysicalProperty, lhs, rhs *candidatePath) int { + // Due to #50125, full scan on MVIndex has been disabled, so MVIndex path might lead to 'can't find a proper plan' error at the end. + // Avoid MVIndex path to exclude all other paths and leading to 'can't find a proper plan' error, see #49438 for an example. + if isMVIndexPath(lhs.path) || isMVIndexPath(rhs.path) { + return 0 + } + + // This rule is empirical but not always correct. + // If x's range row count is significantly lower than y's, for example, 1000 times, we think x is better. + if lhs.path.CountAfterAccess > 100 && rhs.path.CountAfterAccess > 100 && // to prevent some extreme cases, e.g. 0.01 : 10 + len(lhs.path.PartialIndexPaths) == 0 && len(rhs.path.PartialIndexPaths) == 0 && // not IndexMerge since its row count estimation is not accurate enough + prop.ExpectedCnt == math.MaxFloat64 { // Limit may affect access row count + threshold := float64(fixcontrol.GetIntWithDefault(sctx.GetSessionVars().OptimizerFixControl, fixcontrol.Fix45132, 1000)) + if threshold > 0 { // set it to 0 to disable this rule + if lhs.path.CountAfterAccess/rhs.path.CountAfterAccess > threshold { + return -1 + } + if rhs.path.CountAfterAccess/lhs.path.CountAfterAccess > threshold { + return 1 + } + } + } + + // Below compares the two candidate paths on three dimensions: + // (1): the set of columns that occurred in the access condition, + // (2): does it require a double scan, + // (3): whether or not it matches the physical property, + // (4): it's a global index path or not. + // If `x` is not worse than `y` at all factors, + // and there exists one factor that `x` is better than `y`, then `x` is better than `y`. + accessResult, comparable1 := util.CompareCol2Len(lhs.accessCondsColMap, rhs.accessCondsColMap) + if !comparable1 { + return 0 + } + scanResult, comparable2 := compareIndexBack(lhs, rhs) + if !comparable2 { + return 0 + } + matchResult, globalResult := compareBool(lhs.isMatchProp, rhs.isMatchProp), compareGlobalIndex(lhs, rhs) + sum := accessResult + scanResult + matchResult + globalResult + if accessResult >= 0 && scanResult >= 0 && matchResult >= 0 && globalResult >= 0 && sum > 0 { + return 1 + } + if accessResult <= 0 && scanResult <= 0 && matchResult <= 0 && globalResult <= 0 && sum < 0 { + return -1 + } + return 0 +} + +func isMatchProp(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) bool { + var isMatchProp bool + if path.IsIntHandlePath { + pkCol := ds.getPKIsHandleCol() + if len(prop.SortItems) == 1 && pkCol != nil { + isMatchProp = prop.SortItems[0].Col.EqualColumn(pkCol) + if path.StoreType == kv.TiFlash { + isMatchProp = isMatchProp && !prop.SortItems[0].Desc + } + } + return isMatchProp + } + all, _ := prop.AllSameOrder() + // When the prop is empty or `all` is false, `isMatchProp` is better to be `false` because + // it needs not to keep order for index scan. + + // Basically, if `prop.SortItems` is the prefix of `path.IdxCols`, then `isMatchProp` is true. However, we need to consider + // the situations when some columns of `path.IdxCols` are evaluated as constant. For example: + // ``` + // create table t(a int, b int, c int, d int, index idx_a_b_c(a, b, c), index idx_d_c_b_a(d, c, b, a)); + // select * from t where a = 1 order by b, c; + // select * from t where b = 1 order by a, c; + // select * from t where d = 1 and b = 2 order by c, a; + // select * from t where d = 1 and b = 2 order by c, b, a; + // ``` + // In the first two `SELECT` statements, `idx_a_b_c` matches the sort order. In the last two `SELECT` statements, `idx_d_c_b_a` + // matches the sort order. Hence, we use `path.ConstCols` to deal with the above situations. + if !prop.IsSortItemEmpty() && all && len(path.IdxCols) >= len(prop.SortItems) { + isMatchProp = true + i := 0 + for _, sortItem := range prop.SortItems { + found := false + for ; i < len(path.IdxCols); i++ { + if path.IdxColLens[i] == types.UnspecifiedLength && sortItem.Col.EqualColumn(path.IdxCols[i]) { + found = true + i++ + break + } + if path.ConstCols == nil || i >= len(path.ConstCols) || !path.ConstCols[i] { + break + } + } + if !found { + isMatchProp = false + break + } + } + } + return isMatchProp +} + +// matchPropForIndexMergeAlternatives will match the prop with inside PartialAlternativeIndexPaths, and choose +// 1 matched alternative to be a determined index merge partial path for each dimension in PartialAlternativeIndexPaths. +// finally, after we collected the all decided index merge partial paths, we will output a concrete index merge path +// with field PartialIndexPaths is fulfilled here. +// +// as we mentioned before, after deriveStats is done, the normal index OR path will be generated like below: +// +// `create table t (a int, b int, c int, key a(a), key b(b), key ac(a, c), key bc(b, c))` +// `explain format='verbose' select * from t where a=1 or b=1 order by c` +// +// like the case here: +// normal index merge OR path should be: +// for a=1, it has two partial alternative paths: [a, ac] +// for b=1, it has two partial alternative paths: [b, bc] +// and the index merge path: +// +// indexMergePath: { +// PartialIndexPaths: empty // 1D array here, currently is not decided yet. +// PartialAlternativeIndexPaths: [[a, ac], [b, bc]] // 2D array here, each for one DNF item choices. +// } +// +// let's say we have a prop requirement like sort by [c] here, we will choose the better one [ac] (because it can keep +// order) for the first batch [a, ac] from PartialAlternativeIndexPaths; and choose the better one [bc] (because it can +// keep order too) for the second batch [b, bc] from PartialAlternativeIndexPaths. Finally we output a concrete index +// merge path as +// +// indexMergePath: { +// PartialIndexPaths: [ac, bc] // just collected since they match the prop. +// ... +// } +// +// how about the prop is empty? that means the choice to be decided from [a, ac] and [b, bc] is quite random just according +// to their countAfterAccess. That's why we use a slices.SortFunc(matchIdxes, func(a, b int){}) inside there. After sort, +// the ASC order of matchIdxes of matched paths are ordered by their countAfterAccess, choosing the first one is straight forward. +// +// there is another case shown below, just the pick the first one after matchIdxes is ordered is not always right, as shown: +// special logic for alternative paths: +// +// index merge: +// matched paths-1: {pk, index1} +// matched paths-2: {pk} +// +// if we choose first one as we talked above, says pk here in the first matched paths, then path2 has no choice(avoiding all same +// index logic inside) but pk, this will result in all single index failure. so we need to sort the matchIdxes again according to +// their matched paths length, here mean: +// +// index merge: +// matched paths-1: {pk, index1} +// matched paths-2: {pk} +// +// and let matched paths-2 to be the first to make their determination --- choosing pk here, then next turn is matched paths-1 to +// make their choice, since pk is occupied, avoiding-all-same-index-logic inside will try to pick index1 here, so work can be done. +// +// at last, according to determinedIndexPartialPaths to rewrite their real countAfterAccess, this part is move from deriveStats to +// here. +func matchPropForIndexMergeAlternatives(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) (*util.AccessPath, bool) { + // target: + // 1: index merge case, try to match the every alternative partial path to the order property as long as + // possible, and generate that property-matched index merge path out if any. + // 2: If the prop is empty (means no sort requirement), we will generate a random index partial combination + // path from all alternatives in case that no index merge path comes out. + + // Execution part doesn't support the merge operation for intersection case yet. + if path.IndexMergeIsIntersection { + return nil, false + } + + noSortItem := prop.IsSortItemEmpty() + allSame, _ := prop.AllSameOrder() + if !allSame { + return nil, false + } + // step1: match the property from all the index partial alternative paths. + determinedIndexPartialPaths := make([]*util.AccessPath, 0, len(path.PartialAlternativeIndexPaths)) + usedIndexMap := make(map[int64]struct{}, 1) + type idxWrapper struct { + // matchIdx is those match alternative paths from one alternative paths set. + // like we said above, for a=1, it has two partial alternative paths: [a, ac] + // if we met an empty property here, matchIdx from [a, ac] for a=1 will be both. = [0,1] + // if we met an sort[c] property here, matchIdx from [a, ac] for a=1 will be both. = [1] + matchIdx []int + // pathIdx actually is original position offset indicates where current matchIdx is + // computed from. eg: [[a, ac], [b, bc]] for sort[c] property: + // idxWrapper{[ac], 0}, 0 is the offset in first dimension of PartialAlternativeIndexPaths + // idxWrapper{[bc], 1}, 1 is the offset in first dimension of PartialAlternativeIndexPaths + pathIdx int + } + allMatchIdxes := make([]idxWrapper, 0, len(path.PartialAlternativeIndexPaths)) + // special logic for alternative paths: + // index merge: + // path1: {pk, index1} + // path2: {pk} + // if we choose pk in the first path, then path2 has no choice but pk, this will result in all single index failure. + // so we should collect all match prop paths down, stored as matchIdxes here. + for pathIdx, oneItemAlternatives := range path.PartialAlternativeIndexPaths { + matchIdxes := make([]int, 0, 1) + for i, oneIndexAlternativePath := range oneItemAlternatives { + // if there is some sort items and this path doesn't match this prop, continue. + if !noSortItem && !isMatchProp(ds, oneIndexAlternativePath, prop) { + continue + } + // two possibility here: + // 1. no sort items requirement. + // 2. matched with sorted items. + matchIdxes = append(matchIdxes, i) + } + if len(matchIdxes) == 0 { + // if all index alternative of one of the cnf item's couldn't match the sort property, + // the entire index merge union path can be ignored for this sort property, return false. + return nil, false + } + if len(matchIdxes) > 1 { + // if matchIdxes greater than 1, we should sort this match alternative path by its CountAfterAccess. + tmpOneItemAlternatives := oneItemAlternatives + slices.SortStableFunc(matchIdxes, func(a, b int) int { + lhsCountAfter := tmpOneItemAlternatives[a].CountAfterAccess + if len(tmpOneItemAlternatives[a].IndexFilters) > 0 { + lhsCountAfter = tmpOneItemAlternatives[a].CountAfterIndex + } + rhsCountAfter := tmpOneItemAlternatives[b].CountAfterAccess + if len(tmpOneItemAlternatives[b].IndexFilters) > 0 { + rhsCountAfter = tmpOneItemAlternatives[b].CountAfterIndex + } + res := cmp.Compare(lhsCountAfter, rhsCountAfter) + if res != 0 { + return res + } + // If CountAfterAccess is same, any path is global index should be the first one. + var lIsGlobalIndex, rIsGlobalIndex int + if !tmpOneItemAlternatives[a].IsTablePath() && tmpOneItemAlternatives[a].Index.Global { + lIsGlobalIndex = 1 + } + if !tmpOneItemAlternatives[b].IsTablePath() && tmpOneItemAlternatives[b].Index.Global { + rIsGlobalIndex = 1 + } + return -cmp.Compare(lIsGlobalIndex, rIsGlobalIndex) + }) + } + allMatchIdxes = append(allMatchIdxes, idxWrapper{matchIdxes, pathIdx}) + } + // sort allMatchIdxes by its element length. + // index merge: index merge: + // path1: {pk, index1} ==> path2: {pk} + // path2: {pk} path1: {pk, index1} + // here for the fixed choice pk of path2, let it be the first one to choose, left choice of index1 to path1. + slices.SortStableFunc(allMatchIdxes, func(a, b idxWrapper) int { + lhsLen := len(a.matchIdx) + rhsLen := len(b.matchIdx) + return cmp.Compare(lhsLen, rhsLen) + }) + for _, matchIdxes := range allMatchIdxes { + // since matchIdxes are ordered by matchIdxes's length, + // we should use matchIdxes.pathIdx to locate where it comes from. + alternatives := path.PartialAlternativeIndexPaths[matchIdxes.pathIdx] + found := false + // pick a most suitable index partial alternative from all matched alternative paths according to asc CountAfterAccess, + // By this way, a distinguished one is better. + for _, oneIdx := range matchIdxes.matchIdx { + var indexID int64 + if alternatives[oneIdx].IsTablePath() { + indexID = -1 + } else { + indexID = alternatives[oneIdx].Index.ID + } + if _, ok := usedIndexMap[indexID]; !ok { + // try to avoid all index partial paths are all about a single index. + determinedIndexPartialPaths = append(determinedIndexPartialPaths, alternatives[oneIdx].Clone()) + usedIndexMap[indexID] = struct{}{} + found = true + break + } + } + if !found { + // just pick the same name index (just using the first one is ok), in case that there may be some other + // picked distinctive index path for other partial paths latter. + determinedIndexPartialPaths = append(determinedIndexPartialPaths, alternatives[matchIdxes.matchIdx[0]].Clone()) + // uedIndexMap[oneItemAlternatives[oneIdx].Index.ID] = struct{}{} must already be colored. + } + } + if len(usedIndexMap) == 1 { + // if all partial path are using a same index, meaningless and fail over. + return nil, false + } + // step2: gen a new **concrete** index merge path. + indexMergePath := &util.AccessPath{ + PartialIndexPaths: determinedIndexPartialPaths, + IndexMergeIsIntersection: false, + // inherit those determined can't pushed-down table filters. + TableFilters: path.TableFilters, + } + // path.ShouldBeKeptCurrentFilter record that whether there are some part of the cnf item couldn't be pushed down to tikv already. + shouldKeepCurrentFilter := path.KeepIndexMergeORSourceFilter + pushDownCtx := util.GetPushDownCtx(ds.SCtx()) + for _, path := range determinedIndexPartialPaths { + // If any partial path contains table filters, we need to keep the whole DNF filter in the Selection. + if len(path.TableFilters) > 0 { + if !expression.CanExprsPushDown(pushDownCtx, path.TableFilters, kv.TiKV) { + // if this table filters can't be pushed down, all of them should be kept in the table side, cleaning the lookup side here. + path.TableFilters = nil + } + shouldKeepCurrentFilter = true + } + // If any partial path's index filter cannot be pushed to TiKV, we should keep the whole DNF filter. + if len(path.IndexFilters) != 0 && !expression.CanExprsPushDown(pushDownCtx, path.IndexFilters, kv.TiKV) { + shouldKeepCurrentFilter = true + // Clear IndexFilter, the whole filter will be put in indexMergePath.TableFilters. + path.IndexFilters = nil + } + } + // Keep this filter as a part of table filters for safety if it has any parameter. + if expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), []expression.Expression{path.IndexMergeORSourceFilter}) { + shouldKeepCurrentFilter = true + } + if shouldKeepCurrentFilter { + // add the cnf expression back as table filer. + indexMergePath.TableFilters = append(indexMergePath.TableFilters, path.IndexMergeORSourceFilter) + } + + // step3: after the index merge path is determined, compute the countAfterAccess as usual. + accessConds := make([]expression.Expression, 0, len(determinedIndexPartialPaths)) + for _, p := range determinedIndexPartialPaths { + indexCondsForP := p.AccessConds[:] + indexCondsForP = append(indexCondsForP, p.IndexFilters...) + if len(indexCondsForP) > 0 { + accessConds = append(accessConds, expression.ComposeCNFCondition(ds.SCtx().GetExprCtx(), indexCondsForP...)) + } + } + accessDNF := expression.ComposeDNFCondition(ds.SCtx().GetExprCtx(), accessConds...) + sel, _, err := cardinality.Selectivity(ds.SCtx(), ds.TableStats.HistColl, []expression.Expression{accessDNF}, nil) + if err != nil { + logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err)) + sel = cost.SelectionFactor + } + indexMergePath.CountAfterAccess = sel * ds.TableStats.RowCount + if noSortItem { + // since there is no sort property, index merge case is generated by random combination, each alternative with the lower/lowest + // countAfterAccess, here the returned matchProperty should be false. + return indexMergePath, false + } + return indexMergePath, true +} + +func isMatchPropForIndexMerge(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) bool { + // Execution part doesn't support the merge operation for intersection case yet. + if path.IndexMergeIsIntersection { + return false + } + allSame, _ := prop.AllSameOrder() + if !allSame { + return false + } + for _, partialPath := range path.PartialIndexPaths { + if !isMatchProp(ds, partialPath, prop) { + return false + } + } + return true +} + +func getTableCandidate(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { + candidate := &candidatePath{path: path} + candidate.isMatchProp = isMatchProp(ds, path, prop) + candidate.accessCondsColMap = util.ExtractCol2Len(ds.SCtx().GetExprCtx().GetEvalCtx(), path.AccessConds, nil, nil) + return candidate +} + +func getIndexCandidate(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { + candidate := &candidatePath{path: path} + candidate.isMatchProp = isMatchProp(ds, path, prop) + candidate.accessCondsColMap = util.ExtractCol2Len(ds.SCtx().GetExprCtx().GetEvalCtx(), path.AccessConds, path.IdxCols, path.IdxColLens) + candidate.indexCondsColMap = util.ExtractCol2Len(ds.SCtx().GetExprCtx().GetEvalCtx(), append(path.AccessConds, path.IndexFilters...), path.FullIdxCols, path.FullIdxColLens) + return candidate +} + +func convergeIndexMergeCandidate(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { + // since the all index path alternative paths is collected and undetermined, and we should determine a possible and concrete path for this prop. + possiblePath, match := matchPropForIndexMergeAlternatives(ds, path, prop) + if possiblePath == nil { + return nil + } + candidate := &candidatePath{path: possiblePath, isMatchProp: match} + return candidate +} + +func getIndexMergeCandidate(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { + candidate := &candidatePath{path: path} + candidate.isMatchProp = isMatchPropForIndexMerge(ds, path, prop) + return candidate +} + +// skylinePruning prunes access paths according to different factors. An access path can be pruned only if +// there exists a path that is not worse than it at all factors and there is at least one better factor. +func skylinePruning(ds *DataSource, prop *property.PhysicalProperty) []*candidatePath { + candidates := make([]*candidatePath, 0, 4) + for _, path := range ds.PossibleAccessPaths { + // We should check whether the possible access path is valid first. + if path.StoreType != kv.TiFlash && prop.IsFlashProp() { + continue + } + if len(path.PartialAlternativeIndexPaths) > 0 { + // OR normal index merge path, try to determine every index partial path for this property. + candidate := convergeIndexMergeCandidate(ds, path, prop) + if candidate != nil { + candidates = append(candidates, candidate) + } + continue + } + if path.PartialIndexPaths != nil { + candidates = append(candidates, getIndexMergeCandidate(ds, path, prop)) + continue + } + // if we already know the range of the scan is empty, just return a TableDual + if len(path.Ranges) == 0 { + return []*candidatePath{{path: path}} + } + var currentCandidate *candidatePath + if path.IsTablePath() { + currentCandidate = getTableCandidate(ds, path, prop) + } else { + if !(len(path.AccessConds) > 0 || !prop.IsSortItemEmpty() || path.Forced || path.IsSingleScan) { + continue + } + // We will use index to generate physical plan if any of the following conditions is satisfied: + // 1. This path's access cond is not nil. + // 2. We have a non-empty prop to match. + // 3. This index is forced to choose. + // 4. The needed columns are all covered by index columns(and handleCol). + currentCandidate = getIndexCandidate(ds, path, prop) + } + pruned := false + for i := len(candidates) - 1; i >= 0; i-- { + if candidates[i].path.StoreType == kv.TiFlash { + continue + } + result := compareCandidates(ds.SCtx(), prop, candidates[i], currentCandidate) + if result == 1 { + pruned = true + // We can break here because the current candidate cannot prune others anymore. + break + } else if result == -1 { + candidates = append(candidates[:i], candidates[i+1:]...) + } + } + if !pruned { + candidates = append(candidates, currentCandidate) + } + } + + preferRange := ds.SCtx().GetSessionVars().GetAllowPreferRangeScan() && (ds.TableStats.HistColl.Pseudo || ds.TableStats.RowCount < 1) + // If we've forced an index merge - we want to keep these plans + preferMerge := len(ds.IndexMergeHints) > 0 || fixcontrol.GetBoolWithDefault( + ds.SCtx().GetSessionVars().GetOptimizerFixControlMap(), + fixcontrol.Fix52869, + false, + ) + if preferRange && len(candidates) > 1 { + // If a candidate path is TiFlash-path or forced-path or MV index, we just keep them. For other candidate paths, if there exists + // any range scan path, we remove full scan paths and keep range scan paths. + preferredPaths := make([]*candidatePath, 0, len(candidates)) + var hasRangeScanPath bool + for _, c := range candidates { + if c.path.Forced || c.path.StoreType == kv.TiFlash || (c.path.Index != nil && c.path.Index.MVIndex) { + preferredPaths = append(preferredPaths, c) + continue + } + var unsignedIntHandle bool + if c.path.IsIntHandlePath && ds.TableInfo.PKIsHandle { + if pkColInfo := ds.TableInfo.GetPkColInfo(); pkColInfo != nil { + unsignedIntHandle = mysql.HasUnsignedFlag(pkColInfo.GetFlag()) + } + } + if !ranger.HasFullRange(c.path.Ranges, unsignedIntHandle) { + // Preference plans with equals/IN predicates or where there is more filtering in the index than against the table + equalPlan := c.path.EqCondCount > 0 || c.path.EqOrInCondCount > 0 + indexFilters := len(c.path.TableFilters) < len(c.path.IndexFilters) + if preferMerge || (((equalPlan || indexFilters) && prop.IsSortItemEmpty()) || c.isMatchProp) { + preferredPaths = append(preferredPaths, c) + hasRangeScanPath = true + } + } + } + if hasRangeScanPath { + return preferredPaths + } + } + + return candidates +} + +func getPruningInfo(ds *DataSource, candidates []*candidatePath, prop *property.PhysicalProperty) string { + if len(candidates) == len(ds.PossibleAccessPaths) { + return "" + } + if len(candidates) == 1 && len(candidates[0].path.Ranges) == 0 { + // For TableDual, we don't need to output pruning info. + return "" + } + names := make([]string, 0, len(candidates)) + var tableName string + if ds.TableAsName.O == "" { + tableName = ds.TableInfo.Name.O + } else { + tableName = ds.TableAsName.O + } + getSimplePathName := func(path *util.AccessPath) string { + if path.IsTablePath() { + if path.StoreType == kv.TiFlash { + return tableName + "(tiflash)" + } + return tableName + } + return path.Index.Name.O + } + for _, cand := range candidates { + if cand.path.PartialIndexPaths != nil { + partialNames := make([]string, 0, len(cand.path.PartialIndexPaths)) + for _, partialPath := range cand.path.PartialIndexPaths { + partialNames = append(partialNames, getSimplePathName(partialPath)) + } + names = append(names, fmt.Sprintf("IndexMerge{%s}", strings.Join(partialNames, ","))) + } else { + names = append(names, getSimplePathName(cand.path)) + } + } + items := make([]string, 0, len(prop.SortItems)) + for _, item := range prop.SortItems { + items = append(items, item.String()) + } + return fmt.Sprintf("[%s] remain after pruning paths for %s given Prop{SortItems: [%s], TaskTp: %s}", + strings.Join(names, ","), tableName, strings.Join(items, " "), prop.TaskTp) +} + +func isPointGetConvertableSchema(ds *DataSource) bool { + for _, col := range ds.Columns { + if col.Name.L == model.ExtraHandleName.L { + continue + } + + // Only handle tables that all columns are public. + if col.State != model.StatePublic { + return false + } + } + return true +} + +// exploreEnforcedPlan determines whether to explore enforced plans for this DataSource if it has already found an unenforced plan. +// See #46177 for more information. +func exploreEnforcedPlan(ds *DataSource) bool { + // default value is false to keep it compatible with previous versions. + return fixcontrol.GetBoolWithDefault(ds.SCtx().GetSessionVars().GetOptimizerFixControlMap(), fixcontrol.Fix46177, false) +} + +func findBestTask4DS(ds *DataSource, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, opt *optimizetrace.PhysicalOptimizeOp) (t base.Task, cntPlan int64, err error) { + // If ds is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself, + // and set inner child prop nil, so here we do nothing. + if prop == nil { + planCounter.Dec(1) + return nil, 1, nil + } + if ds.IsForUpdateRead && ds.SCtx().GetSessionVars().TxnCtx.IsExplicit { + hasPointGetPath := false + for _, path := range ds.PossibleAccessPaths { + if isPointGetPath(ds, path) { + hasPointGetPath = true + break + } + } + tblName := ds.TableInfo.Name + ds.PossibleAccessPaths, err = filterPathByIsolationRead(ds.SCtx(), ds.PossibleAccessPaths, tblName, ds.DBName) + if err != nil { + return nil, 1, err + } + if hasPointGetPath { + newPaths := make([]*util.AccessPath, 0) + for _, path := range ds.PossibleAccessPaths { + // if the path is the point get range path with for update lock, we should forbid tiflash as it's store path (#39543) + if path.StoreType != kv.TiFlash { + newPaths = append(newPaths, path) + } + } + ds.PossibleAccessPaths = newPaths + } + } + t = ds.GetTask(prop) + if t != nil { + cntPlan = 1 + planCounter.Dec(1) + return + } + var cnt int64 + var unenforcedTask base.Task + // If prop.CanAddEnforcer is true, the prop.SortItems need to be set nil for ds.findBestTask. + // Before function return, reset it for enforcing task prop and storing map. + oldProp := prop.CloneEssentialFields() + if prop.CanAddEnforcer { + // First, get the bestTask without enforced prop + prop.CanAddEnforcer = false + unenforcedTask, cnt, err = ds.FindBestTask(prop, planCounter, opt) + if err != nil { + return nil, 0, err + } + if !unenforcedTask.Invalid() && !exploreEnforcedPlan(ds) { + ds.StoreTask(prop, unenforcedTask) + return unenforcedTask, cnt, nil + } + + // Then, explore the bestTask with enforced prop + prop.CanAddEnforcer = true + cntPlan += cnt + prop.SortItems = []property.SortItem{} + prop.MPPPartitionTp = property.AnyType + } else if prop.MPPPartitionTp != property.AnyType { + return base.InvalidTask, 0, nil + } + defer func() { + if err != nil { + return + } + if prop.CanAddEnforcer { + *prop = *oldProp + t = enforceProperty(prop, t, ds.Plan.SCtx()) + prop.CanAddEnforcer = true + } + + if unenforcedTask != nil && !unenforcedTask.Invalid() { + curIsBest, cerr := compareTaskCost(unenforcedTask, t, opt) + if cerr != nil { + err = cerr + return + } + if curIsBest { + t = unenforcedTask + } + } + + ds.StoreTask(prop, t) + err = validateTableSamplePlan(ds, t, err) + }() + + t, err = tryToGetDualTask(ds) + if err != nil || t != nil { + planCounter.Dec(1) + if t != nil { + appendCandidate(ds, t, prop, opt) + } + return t, 1, err + } + + t = base.InvalidTask + candidates := skylinePruning(ds, prop) + pruningInfo := getPruningInfo(ds, candidates, prop) + defer func() { + if err == nil && t != nil && !t.Invalid() && pruningInfo != "" { + warnErr := errors.NewNoStackError(pruningInfo) + if ds.SCtx().GetSessionVars().StmtCtx.InVerboseExplain { + ds.SCtx().GetSessionVars().StmtCtx.AppendNote(warnErr) + } else { + ds.SCtx().GetSessionVars().StmtCtx.AppendExtraNote(warnErr) + } + } + }() + + cntPlan = 0 + for _, candidate := range candidates { + path := candidate.path + if path.PartialIndexPaths != nil { + // prefer tiflash, while current table path is tikv, skip it. + if ds.PreferStoreType&h.PreferTiFlash != 0 && path.StoreType == kv.TiKV { + continue + } + idxMergeTask, err := convertToIndexMergeScan(ds, prop, candidate, opt) + if err != nil { + return nil, 0, err + } + if !idxMergeTask.Invalid() { + cntPlan++ + planCounter.Dec(1) + } + appendCandidate(ds, idxMergeTask, prop, opt) + + curIsBetter, err := compareTaskCost(idxMergeTask, t, opt) + if err != nil { + return nil, 0, err + } + if curIsBetter || planCounter.Empty() { + t = idxMergeTask + } + if planCounter.Empty() { + return t, cntPlan, nil + } + continue + } + // if we already know the range of the scan is empty, just return a TableDual + if len(path.Ranges) == 0 { + // We should uncache the tableDual plan. + if expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), path.AccessConds) { + ds.SCtx().GetSessionVars().StmtCtx.SetSkipPlanCache("get a TableDual plan") + } + dual := PhysicalTableDual{}.Init(ds.SCtx(), ds.StatsInfo(), ds.QueryBlockOffset()) + dual.SetSchema(ds.Schema()) + cntPlan++ + planCounter.Dec(1) + t := &RootTask{} + t.SetPlan(dual) + appendCandidate(ds, t, prop, opt) + return t, cntPlan, nil + } + + canConvertPointGet := len(path.Ranges) > 0 && path.StoreType == kv.TiKV && isPointGetConvertableSchema(ds) + + if canConvertPointGet && path.Index != nil && path.Index.MVIndex { + canConvertPointGet = false // cannot use PointGet upon MVIndex + } + + if canConvertPointGet && !path.IsIntHandlePath { + // We simply do not build [batch] point get for prefix indexes. This can be optimized. + canConvertPointGet = path.Index.Unique && !path.Index.HasPrefixIndex() + // If any range cannot cover all columns of the index, we cannot build [batch] point get. + idxColsLen := len(path.Index.Columns) + for _, ran := range path.Ranges { + if len(ran.LowVal) != idxColsLen { + canConvertPointGet = false + break + } + } + } + if canConvertPointGet && ds.table.Meta().GetPartitionInfo() != nil { + // partition table with dynamic prune not support batchPointGet + // Due to sorting? + // Please make sure handle `where _tidb_rowid in (xx, xx)` correctly when delete this if statements. + if canConvertPointGet && len(path.Ranges) > 1 && ds.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + canConvertPointGet = false + } + if canConvertPointGet && len(path.Ranges) > 1 { + // TODO: This is now implemented, but to decrease + // the impact of supporting plan cache for patitioning, + // this is not yet enabled. + // TODO: just remove this if block and update/add tests... + // We can only build batch point get for hash partitions on a simple column now. This is + // decided by the current implementation of `BatchPointGetExec::initialize()`, specifically, + // the `getPhysID()` function. Once we optimize that part, we can come back and enable + // BatchPointGet plan for more cases. + hashPartColName := getHashOrKeyPartitionColumnName(ds.SCtx(), ds.table.Meta()) + if hashPartColName == nil { + canConvertPointGet = false + } + } + // Partition table can't use `_tidb_rowid` to generate PointGet Plan unless one partition is explicitly specified. + if canConvertPointGet && path.IsIntHandlePath && !ds.table.Meta().PKIsHandle && len(ds.PartitionNames) != 1 { + canConvertPointGet = false + } + if canConvertPointGet { + if path != nil && path.Index != nil && path.Index.Global { + // Don't convert to point get during ddl + // TODO: Revisit truncate partition and global index + if len(ds.TableInfo.GetPartitionInfo().DroppingDefinitions) > 0 || + len(ds.TableInfo.GetPartitionInfo().AddingDefinitions) > 0 { + canConvertPointGet = false + } + } + } + } + if canConvertPointGet { + allRangeIsPoint := true + tc := ds.SCtx().GetSessionVars().StmtCtx.TypeCtx() + for _, ran := range path.Ranges { + if !ran.IsPointNonNullable(tc) { + // unique indexes can have duplicated NULL rows so we cannot use PointGet if there is NULL + allRangeIsPoint = false + break + } + } + if allRangeIsPoint { + var pointGetTask base.Task + if len(path.Ranges) == 1 { + pointGetTask = convertToPointGet(ds, prop, candidate) + } else { + pointGetTask = convertToBatchPointGet(ds, prop, candidate) + } + + // Batch/PointGet plans may be over-optimized, like `a>=1(?) and a<=1(?)` --> `a=1` --> PointGet(a=1). + // For safety, prevent these plans from the plan cache here. + if !pointGetTask.Invalid() && expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), candidate.path.AccessConds) && !isSafePointGetPath4PlanCache(ds.SCtx(), candidate.path) { + ds.SCtx().GetSessionVars().StmtCtx.SetSkipPlanCache("Batch/PointGet plans may be over-optimized") + } + + appendCandidate(ds, pointGetTask, prop, opt) + if !pointGetTask.Invalid() { + cntPlan++ + planCounter.Dec(1) + } + curIsBetter, cerr := compareTaskCost(pointGetTask, t, opt) + if cerr != nil { + return nil, 0, cerr + } + if curIsBetter || planCounter.Empty() { + t = pointGetTask + if planCounter.Empty() { + return + } + continue + } + } + } + if path.IsTablePath() { + // prefer tiflash, while current table path is tikv, skip it. + if ds.PreferStoreType&h.PreferTiFlash != 0 && path.StoreType == kv.TiKV { + continue + } + // prefer tikv, while current table path is tiflash, skip it. + if ds.PreferStoreType&h.PreferTiKV != 0 && path.StoreType == kv.TiFlash { + continue + } + var tblTask base.Task + if ds.SampleInfo != nil { + tblTask, err = convertToSampleTable(ds, prop, candidate, opt) + } else { + tblTask, err = convertToTableScan(ds, prop, candidate, opt) + } + if err != nil { + return nil, 0, err + } + if !tblTask.Invalid() { + cntPlan++ + planCounter.Dec(1) + } + appendCandidate(ds, tblTask, prop, opt) + curIsBetter, err := compareTaskCost(tblTask, t, opt) + if err != nil { + return nil, 0, err + } + if curIsBetter || planCounter.Empty() { + t = tblTask + } + if planCounter.Empty() { + return t, cntPlan, nil + } + continue + } + // TiFlash storage do not support index scan. + if ds.PreferStoreType&h.PreferTiFlash != 0 { + continue + } + // TableSample do not support index scan. + if ds.SampleInfo != nil { + continue + } + idxTask, err := convertToIndexScan(ds, prop, candidate, opt) + if err != nil { + return nil, 0, err + } + if !idxTask.Invalid() { + cntPlan++ + planCounter.Dec(1) + } + appendCandidate(ds, idxTask, prop, opt) + curIsBetter, err := compareTaskCost(idxTask, t, opt) + if err != nil { + return nil, 0, err + } + if curIsBetter || planCounter.Empty() { + t = idxTask + } + if planCounter.Empty() { + return t, cntPlan, nil + } + } + + return +} + +// convertToIndexMergeScan builds the index merge scan for intersection or union cases. +func convertToIndexMergeScan(ds *DataSource, prop *property.PhysicalProperty, candidate *candidatePath, _ *optimizetrace.PhysicalOptimizeOp) (task base.Task, err error) { + if prop.IsFlashProp() || prop.TaskTp == property.CopSingleReadTaskType { + return base.InvalidTask, nil + } + // lift the limitation of that double read can not build index merge **COP** task with intersection. + // that means we can output a cop task here without encapsulating it as root task, for the convenience of attaching limit to its table side. + + if !prop.IsSortItemEmpty() && !candidate.isMatchProp { + return base.InvalidTask, nil + } + // while for now, we still can not push the sort prop to the intersection index plan side, temporarily banned here. + if !prop.IsSortItemEmpty() && candidate.path.IndexMergeIsIntersection { + return base.InvalidTask, nil + } + failpoint.Inject("forceIndexMergeKeepOrder", func(_ failpoint.Value) { + if len(candidate.path.PartialIndexPaths) > 0 && !candidate.path.IndexMergeIsIntersection { + if prop.IsSortItemEmpty() { + failpoint.Return(base.InvalidTask, nil) + } + } + }) + path := candidate.path + scans := make([]base.PhysicalPlan, 0, len(path.PartialIndexPaths)) + cop := &CopTask{ + indexPlanFinished: false, + tblColHists: ds.TblColHists, + } + cop.physPlanPartInfo = &PhysPlanPartInfo{ + PruningConds: pushDownNot(ds.SCtx().GetExprCtx(), ds.AllConds), + PartitionNames: ds.PartitionNames, + Columns: ds.TblCols, + ColumnNames: ds.OutputNames(), + } + // Add sort items for index scan for merge-sort operation between partitions. + byItems := make([]*util.ByItems, 0, len(prop.SortItems)) + for _, si := range prop.SortItems { + byItems = append(byItems, &util.ByItems{ + Expr: si.Col, + Desc: si.Desc, + }) + } + globalRemainingFilters := make([]expression.Expression, 0, 3) + for _, partPath := range path.PartialIndexPaths { + var scan base.PhysicalPlan + if partPath.IsTablePath() { + scan = convertToPartialTableScan(ds, prop, partPath, candidate.isMatchProp, byItems) + } else { + var remainingFilters []expression.Expression + scan, remainingFilters, err = convertToPartialIndexScan(ds, cop.physPlanPartInfo, prop, partPath, candidate.isMatchProp, byItems) + if err != nil { + return base.InvalidTask, err + } + if prop.TaskTp != property.RootTaskType && len(remainingFilters) > 0 { + return base.InvalidTask, nil + } + globalRemainingFilters = append(globalRemainingFilters, remainingFilters...) + } + scans = append(scans, scan) + } + totalRowCount := path.CountAfterAccess + if prop.ExpectedCnt < ds.StatsInfo().RowCount { + totalRowCount *= prop.ExpectedCnt / ds.StatsInfo().RowCount + } + ts, remainingFilters2, moreColumn, err := buildIndexMergeTableScan(ds, path.TableFilters, totalRowCount, candidate.isMatchProp) + if err != nil { + return base.InvalidTask, err + } + if prop.TaskTp != property.RootTaskType && len(remainingFilters2) > 0 { + return base.InvalidTask, nil + } + globalRemainingFilters = append(globalRemainingFilters, remainingFilters2...) + cop.keepOrder = candidate.isMatchProp + cop.tablePlan = ts + cop.idxMergePartPlans = scans + cop.idxMergeIsIntersection = path.IndexMergeIsIntersection + cop.idxMergeAccessMVIndex = path.IndexMergeAccessMVIndex + if moreColumn { + cop.needExtraProj = true + cop.originSchema = ds.Schema() + } + if len(globalRemainingFilters) != 0 { + cop.rootTaskConds = globalRemainingFilters + } + // after we lift the limitation of intersection and cop-type task in the code in this + // function above, we could set its index plan finished as true once we found its table + // plan is pure table scan below. + // And this will cause cost underestimation when we estimate the cost of the entire cop + // task plan in function `getTaskPlanCost`. + if prop.TaskTp == property.RootTaskType { + cop.indexPlanFinished = true + task = cop.ConvertToRootTask(ds.SCtx()) + } else { + _, pureTableScan := ts.(*PhysicalTableScan) + if !pureTableScan { + cop.indexPlanFinished = true + } + task = cop + } + return task, nil +} + +func convertToPartialIndexScan(ds *DataSource, physPlanPartInfo *PhysPlanPartInfo, prop *property.PhysicalProperty, path *util.AccessPath, matchProp bool, byItems []*util.ByItems) (base.PhysicalPlan, []expression.Expression, error) { + is := getOriginalPhysicalIndexScan(ds, prop, path, matchProp, false) + // TODO: Consider using isIndexCoveringColumns() to avoid another TableRead + indexConds := path.IndexFilters + if matchProp { + if is.Table.GetPartitionInfo() != nil && !is.Index.Global && is.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + is.Columns, is.schema, _ = AddExtraPhysTblIDColumn(is.SCtx(), is.Columns, is.schema) + } + // Add sort items for index scan for merge-sort operation between partitions. + is.ByItems = byItems + } + + // Add a `Selection` for `IndexScan` with global index. + // It should pushdown to TiKV, DataSource schema doesn't contain partition id column. + indexConds, err := is.addSelectionConditionForGlobalIndex(ds, physPlanPartInfo, indexConds) + if err != nil { + return nil, nil, err + } + + if len(indexConds) > 0 { + pushedFilters, remainingFilter := extractFiltersForIndexMerge(util.GetPushDownCtx(ds.SCtx()), indexConds) + var selectivity float64 + if path.CountAfterAccess > 0 { + selectivity = path.CountAfterIndex / path.CountAfterAccess + } + rowCount := is.StatsInfo().RowCount * selectivity + stats := &property.StatsInfo{RowCount: rowCount} + stats.StatsVersion = ds.StatisticTable.Version + if ds.StatisticTable.Pseudo { + stats.StatsVersion = statistics.PseudoVersion + } + indexPlan := PhysicalSelection{Conditions: pushedFilters}.Init(is.SCtx(), stats, ds.QueryBlockOffset()) + indexPlan.SetChildren(is) + return indexPlan, remainingFilter, nil + } + return is, nil, nil +} + +func checkColinSchema(cols []*expression.Column, schema *expression.Schema) bool { + for _, col := range cols { + if schema.ColumnIndex(col) == -1 { + return false + } + } + return true +} + +func convertToPartialTableScan(ds *DataSource, prop *property.PhysicalProperty, path *util.AccessPath, matchProp bool, byItems []*util.ByItems) (tablePlan base.PhysicalPlan) { + ts, rowCount := getOriginalPhysicalTableScan(ds, prop, path, matchProp) + overwritePartialTableScanSchema(ds, ts) + // remove ineffetive filter condition after overwriting physicalscan schema + newFilterConds := make([]expression.Expression, 0, len(path.TableFilters)) + for _, cond := range ts.filterCondition { + cols := expression.ExtractColumns(cond) + if checkColinSchema(cols, ts.schema) { + newFilterConds = append(newFilterConds, cond) + } + } + ts.filterCondition = newFilterConds + if matchProp { + if ts.Table.GetPartitionInfo() != nil && ts.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + ts.Columns, ts.schema, _ = AddExtraPhysTblIDColumn(ts.SCtx(), ts.Columns, ts.schema) + } + ts.ByItems = byItems + } + if len(ts.filterCondition) > 0 { + selectivity, _, err := cardinality.Selectivity(ds.SCtx(), ds.TableStats.HistColl, ts.filterCondition, nil) + if err != nil { + logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) + selectivity = cost.SelectionFactor + } + tablePlan = PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.SCtx(), ts.StatsInfo().ScaleByExpectCnt(selectivity*rowCount), ds.QueryBlockOffset()) + tablePlan.SetChildren(ts) + return tablePlan + } + tablePlan = ts + return tablePlan +} + +// overwritePartialTableScanSchema change the schema of partial table scan to handle columns. +func overwritePartialTableScanSchema(ds *DataSource, ts *PhysicalTableScan) { + handleCols := ds.HandleCols + if handleCols == nil { + handleCols = util.NewIntHandleCols(ds.newExtraHandleSchemaCol()) + } + hdColNum := handleCols.NumCols() + exprCols := make([]*expression.Column, 0, hdColNum) + infoCols := make([]*model.ColumnInfo, 0, hdColNum) + for i := 0; i < hdColNum; i++ { + col := handleCols.GetCol(i) + exprCols = append(exprCols, col) + if c := model.FindColumnInfoByID(ds.TableInfo.Columns, col.ID); c != nil { + infoCols = append(infoCols, c) + } else { + infoCols = append(infoCols, col.ToInfo()) + } + } + ts.schema = expression.NewSchema(exprCols...) + ts.Columns = infoCols +} + +// setIndexMergeTableScanHandleCols set the handle columns of the table scan. +func setIndexMergeTableScanHandleCols(ds *DataSource, ts *PhysicalTableScan) (err error) { + handleCols := ds.HandleCols + if handleCols == nil { + handleCols = util.NewIntHandleCols(ds.newExtraHandleSchemaCol()) + } + hdColNum := handleCols.NumCols() + exprCols := make([]*expression.Column, 0, hdColNum) + for i := 0; i < hdColNum; i++ { + col := handleCols.GetCol(i) + exprCols = append(exprCols, col) + } + ts.HandleCols, err = handleCols.ResolveIndices(expression.NewSchema(exprCols...)) + return +} + +// buildIndexMergeTableScan() returns Selection that will be pushed to TiKV. +// Filters that cannot be pushed to TiKV are also returned, and an extra Selection above IndexMergeReader will be constructed later. +func buildIndexMergeTableScan(ds *DataSource, tableFilters []expression.Expression, + totalRowCount float64, matchProp bool) (base.PhysicalPlan, []expression.Expression, bool, error) { + ts := PhysicalTableScan{ + Table: ds.TableInfo, + Columns: slices.Clone(ds.Columns), + TableAsName: ds.TableAsName, + DBName: ds.DBName, + isPartition: ds.PartitionDefIdx != nil, + physicalTableID: ds.PhysicalTableID, + HandleCols: ds.HandleCols, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, + }.Init(ds.SCtx(), ds.QueryBlockOffset()) + ts.SetSchema(ds.Schema().Clone()) + err := setIndexMergeTableScanHandleCols(ds, ts) + if err != nil { + return nil, nil, false, err + } + ts.SetStats(ds.TableStats.ScaleByExpectCnt(totalRowCount)) + usedStats := ds.SCtx().GetSessionVars().StmtCtx.GetUsedStatsInfo(false) + if usedStats != nil && usedStats.GetUsedInfo(ts.physicalTableID) != nil { + ts.usedStatsInfo = usedStats.GetUsedInfo(ts.physicalTableID) + } + if ds.StatisticTable.Pseudo { + ts.StatsInfo().StatsVersion = statistics.PseudoVersion + } + var currentTopPlan base.PhysicalPlan = ts + if len(tableFilters) > 0 { + pushedFilters, remainingFilters := extractFiltersForIndexMerge(util.GetPushDownCtx(ds.SCtx()), tableFilters) + pushedFilters1, remainingFilters1 := SplitSelCondsWithVirtualColumn(pushedFilters) + pushedFilters = pushedFilters1 + remainingFilters = append(remainingFilters, remainingFilters1...) + if len(pushedFilters) != 0 { + selectivity, _, err := cardinality.Selectivity(ds.SCtx(), ds.TableStats.HistColl, pushedFilters, nil) + if err != nil { + logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) + selectivity = cost.SelectionFactor + } + sel := PhysicalSelection{Conditions: pushedFilters}.Init(ts.SCtx(), ts.StatsInfo().ScaleByExpectCnt(selectivity*totalRowCount), ts.QueryBlockOffset()) + sel.SetChildren(ts) + currentTopPlan = sel + } + if len(remainingFilters) > 0 { + return currentTopPlan, remainingFilters, false, nil + } + } + // If we don't need to use ordered scan, we don't need do the following codes for adding new columns. + if !matchProp { + return currentTopPlan, nil, false, nil + } + + // Add the row handle into the schema. + columnAdded := false + if ts.Table.PKIsHandle { + pk := ts.Table.GetPkColInfo() + pkCol := expression.ColInfo2Col(ts.tblCols, pk) + if !ts.schema.Contains(pkCol) { + ts.schema.Append(pkCol) + ts.Columns = append(ts.Columns, pk) + columnAdded = true + } + } else if ts.Table.IsCommonHandle { + idxInfo := ts.Table.GetPrimaryKey() + for _, idxCol := range idxInfo.Columns { + col := ts.tblCols[idxCol.Offset] + if !ts.schema.Contains(col) { + columnAdded = true + ts.schema.Append(col) + ts.Columns = append(ts.Columns, col.ToInfo()) + } + } + } else if !ts.schema.Contains(ts.HandleCols.GetCol(0)) { + ts.schema.Append(ts.HandleCols.GetCol(0)) + ts.Columns = append(ts.Columns, model.NewExtraHandleColInfo()) + columnAdded = true + } + + // For the global index of the partitioned table, we also need the PhysicalTblID to identify the rows from each partition. + if ts.Table.GetPartitionInfo() != nil && ts.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + var newColAdded bool + ts.Columns, ts.schema, newColAdded = AddExtraPhysTblIDColumn(ts.SCtx(), ts.Columns, ts.schema) + columnAdded = columnAdded || newColAdded + } + return currentTopPlan, nil, columnAdded, nil +} + +// extractFiltersForIndexMerge returns: +// `pushed`: exprs that can be pushed to TiKV. +// `remaining`: exprs that can NOT be pushed to TiKV but can be pushed to other storage engines. +// Why do we need this func? +// IndexMerge only works on TiKV, so we need to find all exprs that cannot be pushed to TiKV, and add a new Selection above IndexMergeReader. +// +// But the new Selection should exclude the exprs that can NOT be pushed to ALL the storage engines. +// Because these exprs have already been put in another Selection(check rule_predicate_push_down). +func extractFiltersForIndexMerge(ctx expression.PushDownContext, filters []expression.Expression) (pushed []expression.Expression, remaining []expression.Expression) { + for _, expr := range filters { + if expression.CanExprsPushDown(ctx, []expression.Expression{expr}, kv.TiKV) { + pushed = append(pushed, expr) + continue + } + if expression.CanExprsPushDown(ctx, []expression.Expression{expr}, kv.UnSpecified) { + remaining = append(remaining, expr) + } + } + return +} + +func isIndexColsCoveringCol(sctx expression.EvalContext, col *expression.Column, indexCols []*expression.Column, idxColLens []int, ignoreLen bool) bool { + for i, indexCol := range indexCols { + if indexCol == nil || !col.EqualByExprAndID(sctx, indexCol) { + continue + } + if ignoreLen || idxColLens[i] == types.UnspecifiedLength || idxColLens[i] == col.RetType.GetFlen() { + return true + } + } + return false +} + +func indexCoveringColumn(ds *DataSource, column *expression.Column, indexColumns []*expression.Column, idxColLens []int, ignoreLen bool) bool { + if ds.TableInfo.PKIsHandle && mysql.HasPriKeyFlag(column.RetType.GetFlag()) { + return true + } + if column.ID == model.ExtraHandleID || column.ID == model.ExtraPhysTblID { + return true + } + evalCtx := ds.SCtx().GetExprCtx().GetEvalCtx() + coveredByPlainIndex := isIndexColsCoveringCol(evalCtx, column, indexColumns, idxColLens, ignoreLen) + coveredByClusteredIndex := isIndexColsCoveringCol(evalCtx, column, ds.CommonHandleCols, ds.CommonHandleLens, ignoreLen) + if !coveredByPlainIndex && !coveredByClusteredIndex { + return false + } + isClusteredNewCollationIdx := collate.NewCollationEnabled() && + column.GetType(evalCtx).EvalType() == types.ETString && + !mysql.HasBinaryFlag(column.GetType(evalCtx).GetFlag()) + if !coveredByPlainIndex && coveredByClusteredIndex && isClusteredNewCollationIdx && ds.table.Meta().CommonHandleVersion == 0 { + return false + } + return true +} + +func isIndexCoveringColumns(ds *DataSource, columns, indexColumns []*expression.Column, idxColLens []int) bool { + for _, col := range columns { + if !indexCoveringColumn(ds, col, indexColumns, idxColLens, false) { + return false + } + } + return true +} + +func isIndexCoveringCondition(ds *DataSource, condition expression.Expression, indexColumns []*expression.Column, idxColLens []int) bool { + switch v := condition.(type) { + case *expression.Column: + return indexCoveringColumn(ds, v, indexColumns, idxColLens, false) + case *expression.ScalarFunction: + // Even if the index only contains prefix `col`, the index can cover `col is null`. + if v.FuncName.L == ast.IsNull { + if col, ok := v.GetArgs()[0].(*expression.Column); ok { + return indexCoveringColumn(ds, col, indexColumns, idxColLens, true) + } + } + for _, arg := range v.GetArgs() { + if !isIndexCoveringCondition(ds, arg, indexColumns, idxColLens) { + return false + } + } + return true + } + return true +} + +func isSingleScan(ds *DataSource, indexColumns []*expression.Column, idxColLens []int) bool { + if !ds.SCtx().GetSessionVars().OptPrefixIndexSingleScan || ds.ColsRequiringFullLen == nil { + // ds.ColsRequiringFullLen is set at (*DataSource).PruneColumns. In some cases we don't reach (*DataSource).PruneColumns + // and ds.ColsRequiringFullLen is nil, so we fall back to ds.isIndexCoveringColumns(ds.schema.Columns, indexColumns, idxColLens). + return isIndexCoveringColumns(ds, ds.Schema().Columns, indexColumns, idxColLens) + } + if !isIndexCoveringColumns(ds, ds.ColsRequiringFullLen, indexColumns, idxColLens) { + return false + } + for _, cond := range ds.AllConds { + if !isIndexCoveringCondition(ds, cond, indexColumns, idxColLens) { + return false + } + } + return true +} + +// If there is a table reader which needs to keep order, we should append a pk to table scan. +func (ts *PhysicalTableScan) appendExtraHandleCol(ds *DataSource) (*expression.Column, bool) { + handleCols := ds.HandleCols + if handleCols != nil { + return handleCols.GetCol(0), false + } + handleCol := ds.newExtraHandleSchemaCol() + ts.schema.Append(handleCol) + ts.Columns = append(ts.Columns, model.NewExtraHandleColInfo()) + return handleCol, true +} + +// convertToIndexScan converts the DataSource to index scan with idx. +func convertToIndexScan(ds *DataSource, prop *property.PhysicalProperty, + candidate *candidatePath, _ *optimizetrace.PhysicalOptimizeOp) (task base.Task, err error) { + if candidate.path.Index.MVIndex { + // MVIndex is special since different index rows may return the same _row_id and this can break some assumptions of IndexReader. + // Currently only support using IndexMerge to access MVIndex instead of IndexReader. + // TODO: make IndexReader support accessing MVIndex directly. + return base.InvalidTask, nil + } + if !candidate.path.IsSingleScan { + // If it's parent requires single read task, return max cost. + if prop.TaskTp == property.CopSingleReadTaskType { + return base.InvalidTask, nil + } + } else if prop.TaskTp == property.CopMultiReadTaskType { + // If it's parent requires double read task, return max cost. + return base.InvalidTask, nil + } + if !prop.IsSortItemEmpty() && !candidate.isMatchProp { + return base.InvalidTask, nil + } + // If we need to keep order for the index scan, we should forbid the non-keep-order index scan when we try to generate the path. + if prop.IsSortItemEmpty() && candidate.path.ForceKeepOrder { + return base.InvalidTask, nil + } + // If we don't need to keep order for the index scan, we should forbid the non-keep-order index scan when we try to generate the path. + if !prop.IsSortItemEmpty() && candidate.path.ForceNoKeepOrder { + return base.InvalidTask, nil + } + path := candidate.path + is := getOriginalPhysicalIndexScan(ds, prop, path, candidate.isMatchProp, candidate.path.IsSingleScan) + cop := &CopTask{ + indexPlan: is, + tblColHists: ds.TblColHists, + tblCols: ds.TblCols, + expectCnt: uint64(prop.ExpectedCnt), + } + cop.physPlanPartInfo = &PhysPlanPartInfo{ + PruningConds: pushDownNot(ds.SCtx().GetExprCtx(), ds.AllConds), + PartitionNames: ds.PartitionNames, + Columns: ds.TblCols, + ColumnNames: ds.OutputNames(), + } + if !candidate.path.IsSingleScan { + // On this way, it's double read case. + ts := PhysicalTableScan{ + Columns: util.CloneColInfos(ds.Columns), + Table: is.Table, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + isPartition: ds.PartitionDefIdx != nil, + physicalTableID: ds.PhysicalTableID, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, + }.Init(ds.SCtx(), is.QueryBlockOffset()) + ts.SetSchema(ds.Schema().Clone()) + // We set `StatsVersion` here and fill other fields in `(*copTask).finishIndexPlan`. Since `copTask.indexPlan` may + // change before calling `(*copTask).finishIndexPlan`, we don't know the stats information of `ts` currently and on + // the other hand, it may be hard to identify `StatsVersion` of `ts` in `(*copTask).finishIndexPlan`. + ts.SetStats(&property.StatsInfo{StatsVersion: ds.TableStats.StatsVersion}) + usedStats := ds.SCtx().GetSessionVars().StmtCtx.GetUsedStatsInfo(false) + if usedStats != nil && usedStats.GetUsedInfo(ts.physicalTableID) != nil { + ts.usedStatsInfo = usedStats.GetUsedInfo(ts.physicalTableID) + } + cop.tablePlan = ts + } + task = cop + if cop.tablePlan != nil && ds.TableInfo.IsCommonHandle { + cop.commonHandleCols = ds.CommonHandleCols + commonHandle := ds.HandleCols.(*util.CommonHandleCols) + for _, col := range commonHandle.GetColumns() { + if ds.Schema().ColumnIndex(col) == -1 { + ts := cop.tablePlan.(*PhysicalTableScan) + ts.Schema().Append(col) + ts.Columns = append(ts.Columns, col.ToInfo()) + cop.needExtraProj = true + } + } + } + if candidate.isMatchProp { + cop.keepOrder = true + if cop.tablePlan != nil && !ds.TableInfo.IsCommonHandle { + col, isNew := cop.tablePlan.(*PhysicalTableScan).appendExtraHandleCol(ds) + cop.extraHandleCol = col + cop.needExtraProj = cop.needExtraProj || isNew + } + + if ds.TableInfo.GetPartitionInfo() != nil { + // Add sort items for index scan for merge-sort operation between partitions, only required for local index. + if !is.Index.Global { + byItems := make([]*util.ByItems, 0, len(prop.SortItems)) + for _, si := range prop.SortItems { + byItems = append(byItems, &util.ByItems{ + Expr: si.Col, + Desc: si.Desc, + }) + } + cop.indexPlan.(*PhysicalIndexScan).ByItems = byItems + } + if cop.tablePlan != nil && ds.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + if !is.Index.Global { + is.Columns, is.schema, _ = AddExtraPhysTblIDColumn(is.SCtx(), is.Columns, is.Schema()) + } + var succ bool + // global index for tableScan with keepOrder also need PhysicalTblID + ts := cop.tablePlan.(*PhysicalTableScan) + ts.Columns, ts.schema, succ = AddExtraPhysTblIDColumn(ts.SCtx(), ts.Columns, ts.Schema()) + cop.needExtraProj = cop.needExtraProj || succ + } + } + } + if cop.needExtraProj { + cop.originSchema = ds.Schema() + } + // prop.IsSortItemEmpty() would always return true when coming to here, + // so we can just use prop.ExpectedCnt as parameter of addPushedDownSelection. + finalStats := ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt) + if err = is.addPushedDownSelection(cop, ds, path, finalStats); err != nil { + return base.InvalidTask, err + } + if prop.TaskTp == property.RootTaskType { + task = task.ConvertToRootTask(ds.SCtx()) + } else if _, ok := task.(*RootTask); ok { + return base.InvalidTask, nil + } + return task, nil +} + +func (is *PhysicalIndexScan) getScanRowSize() float64 { + idx := is.Index + scanCols := make([]*expression.Column, 0, len(idx.Columns)+1) + // If `initSchema` has already appended the handle column in schema, just use schema columns, otherwise, add extra handle column. + if len(idx.Columns) == len(is.schema.Columns) { + scanCols = append(scanCols, is.schema.Columns...) + handleCol := is.pkIsHandleCol + if handleCol != nil { + scanCols = append(scanCols, handleCol) + } + } else { + scanCols = is.schema.Columns + } + return cardinality.GetIndexAvgRowSize(is.SCtx(), is.tblColHists, scanCols, is.Index.Unique) +} + +// initSchema is used to set the schema of PhysicalIndexScan. Before calling this, +// make sure the following field of PhysicalIndexScan are initialized: +// +// PhysicalIndexScan.Table *model.TableInfo +// PhysicalIndexScan.Index *model.IndexInfo +// PhysicalIndexScan.Index.Columns []*IndexColumn +// PhysicalIndexScan.IdxCols []*expression.Column +// PhysicalIndexScan.Columns []*model.ColumnInfo +func (is *PhysicalIndexScan) initSchema(idxExprCols []*expression.Column, isDoubleRead bool) { + indexCols := make([]*expression.Column, len(is.IdxCols), len(is.Index.Columns)+1) + copy(indexCols, is.IdxCols) + + for i := len(is.IdxCols); i < len(is.Index.Columns); i++ { + if idxExprCols[i] != nil { + indexCols = append(indexCols, idxExprCols[i]) + } else { + // TODO: try to reuse the col generated when building the DataSource. + indexCols = append(indexCols, &expression.Column{ + ID: is.Table.Columns[is.Index.Columns[i].Offset].ID, + RetType: &is.Table.Columns[is.Index.Columns[i].Offset].FieldType, + UniqueID: is.SCtx().GetSessionVars().AllocPlanColumnID(), + }) + } + } + is.NeedCommonHandle = is.Table.IsCommonHandle + + if is.NeedCommonHandle { + for i := len(is.Index.Columns); i < len(idxExprCols); i++ { + indexCols = append(indexCols, idxExprCols[i]) + } + } + setHandle := len(indexCols) > len(is.Index.Columns) + if !setHandle { + for i, col := range is.Columns { + if (mysql.HasPriKeyFlag(col.GetFlag()) && is.Table.PKIsHandle) || col.ID == model.ExtraHandleID { + indexCols = append(indexCols, is.dataSourceSchema.Columns[i]) + setHandle = true + break + } + } + } + + var extraPhysTblCol *expression.Column + // If `dataSouceSchema` contains `model.ExtraPhysTblID`, we should add it into `indexScan.schema` + for _, col := range is.dataSourceSchema.Columns { + if col.ID == model.ExtraPhysTblID { + extraPhysTblCol = col.Clone().(*expression.Column) + break + } + } + + if isDoubleRead || is.Index.Global { + // If it's double read case, the first index must return handle. So we should add extra handle column + // if there isn't a handle column. + if !setHandle { + if !is.Table.IsCommonHandle { + indexCols = append(indexCols, &expression.Column{ + RetType: types.NewFieldType(mysql.TypeLonglong), + ID: model.ExtraHandleID, + UniqueID: is.SCtx().GetSessionVars().AllocPlanColumnID(), + OrigName: model.ExtraHandleName.O, + }) + } + } + // If it's global index, handle and PhysTblID columns has to be added, so that needed pids can be filtered. + if is.Index.Global && extraPhysTblCol == nil { + indexCols = append(indexCols, &expression.Column{ + RetType: types.NewFieldType(mysql.TypeLonglong), + ID: model.ExtraPhysTblID, + UniqueID: is.SCtx().GetSessionVars().AllocPlanColumnID(), + OrigName: model.ExtraPhysTblIDName.O, + }) + } + } + + if extraPhysTblCol != nil { + indexCols = append(indexCols, extraPhysTblCol) + } + + is.SetSchema(expression.NewSchema(indexCols...)) +} + +func (is *PhysicalIndexScan) addSelectionConditionForGlobalIndex(p *DataSource, physPlanPartInfo *PhysPlanPartInfo, conditions []expression.Expression) ([]expression.Expression, error) { + if !is.Index.Global { + return conditions, nil + } + args := make([]expression.Expression, 0, len(p.PartitionNames)+1) + for _, col := range is.schema.Columns { + if col.ID == model.ExtraPhysTblID { + args = append(args, col.Clone()) + break + } + } + + if len(args) != 1 { + return nil, errors.Errorf("Can't find column %s in schema %s", model.ExtraPhysTblIDName.O, is.schema) + } + + // For SQL like 'select x from t partition(p0, p1) use index(idx)', + // we will add a `Selection` like `in(t._tidb_pid, p0, p1)` into the plan. + // For truncate/drop partitions, we should only return indexes where partitions still in public state. + idxArr, err := PartitionPruning(p.SCtx(), p.table.GetPartitionedTable(), + physPlanPartInfo.PruningConds, + physPlanPartInfo.PartitionNames, + physPlanPartInfo.Columns, + physPlanPartInfo.ColumnNames) + if err != nil { + return nil, err + } + needNot := false + pInfo := p.TableInfo.GetPartitionInfo() + if len(idxArr) == 1 && idxArr[0] == FullRange { + // Only filter adding and dropping partitions. + if len(pInfo.AddingDefinitions) == 0 && len(pInfo.DroppingDefinitions) == 0 { + return conditions, nil + } + needNot = true + for _, p := range pInfo.AddingDefinitions { + args = append(args, expression.NewInt64Const(p.ID)) + } + for _, p := range pInfo.DroppingDefinitions { + args = append(args, expression.NewInt64Const(p.ID)) + } + } else if len(idxArr) == 0 { + // add an invalid pid as param for `IN` function + args = append(args, expression.NewInt64Const(-1)) + } else { + // `PartitionPruning`` func does not return adding and dropping partitions + for _, idx := range idxArr { + args = append(args, expression.NewInt64Const(pInfo.Definitions[idx].ID)) + } + } + condition, err := expression.NewFunction(p.SCtx().GetExprCtx(), ast.In, types.NewFieldType(mysql.TypeLonglong), args...) + if err != nil { + return nil, err + } + if needNot { + condition, err = expression.NewFunction(p.SCtx().GetExprCtx(), ast.UnaryNot, types.NewFieldType(mysql.TypeLonglong), condition) + if err != nil { + return nil, err + } + } + return append(conditions, condition), nil +} + +func (is *PhysicalIndexScan) addPushedDownSelection(copTask *CopTask, p *DataSource, path *util.AccessPath, finalStats *property.StatsInfo) error { + // Add filter condition to table plan now. + indexConds, tableConds := path.IndexFilters, path.TableFilters + tableConds, copTask.rootTaskConds = SplitSelCondsWithVirtualColumn(tableConds) + + var newRootConds []expression.Expression + pctx := util.GetPushDownCtx(is.SCtx()) + indexConds, newRootConds = expression.PushDownExprs(pctx, indexConds, kv.TiKV) + copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...) + + tableConds, newRootConds = expression.PushDownExprs(pctx, tableConds, kv.TiKV) + copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...) + + // Add a `Selection` for `IndexScan` with global index. + // It should pushdown to TiKV, DataSource schema doesn't contain partition id column. + indexConds, err := is.addSelectionConditionForGlobalIndex(p, copTask.physPlanPartInfo, indexConds) + if err != nil { + return err + } + + if indexConds != nil { + var selectivity float64 + if path.CountAfterAccess > 0 { + selectivity = path.CountAfterIndex / path.CountAfterAccess + } + count := is.StatsInfo().RowCount * selectivity + stats := p.TableStats.ScaleByExpectCnt(count) + indexSel := PhysicalSelection{Conditions: indexConds}.Init(is.SCtx(), stats, is.QueryBlockOffset()) + indexSel.SetChildren(is) + copTask.indexPlan = indexSel + } + if len(tableConds) > 0 { + copTask.finishIndexPlan() + tableSel := PhysicalSelection{Conditions: tableConds}.Init(is.SCtx(), finalStats, is.QueryBlockOffset()) + if len(copTask.rootTaskConds) != 0 { + selectivity, _, err := cardinality.Selectivity(is.SCtx(), copTask.tblColHists, tableConds, nil) + if err != nil { + logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) + selectivity = cost.SelectionFactor + } + tableSel.SetStats(copTask.Plan().StatsInfo().Scale(selectivity)) + } + tableSel.SetChildren(copTask.tablePlan) + copTask.tablePlan = tableSel + } + return nil +} + +// NeedExtraOutputCol is designed for check whether need an extra column for +// pid or physical table id when build indexReq. +func (is *PhysicalIndexScan) NeedExtraOutputCol() bool { + if is.Table.Partition == nil { + return false + } + // has global index, should return pid + if is.Index.Global { + return true + } + // has embedded limit, should return physical table id + if len(is.ByItems) != 0 && is.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { + return true + } + return false +} + +// SplitSelCondsWithVirtualColumn filter the select conditions which contain virtual column +func SplitSelCondsWithVirtualColumn(conds []expression.Expression) (withoutVirt []expression.Expression, withVirt []expression.Expression) { + for i := range conds { + if expression.ContainVirtualColumn(conds[i : i+1]) { + withVirt = append(withVirt, conds[i]) + } else { + withoutVirt = append(withoutVirt, conds[i]) + } + } + return withoutVirt, withVirt +} + +func matchIndicesProp(sctx base.PlanContext, idxCols []*expression.Column, colLens []int, propItems []property.SortItem) bool { + if len(idxCols) < len(propItems) { + return false + } + for i, item := range propItems { + if colLens[i] != types.UnspecifiedLength || !item.Col.EqualByExprAndID(sctx.GetExprCtx().GetEvalCtx(), idxCols[i]) { + return false + } + } + return true +} + +func splitIndexFilterConditions(ds *DataSource, conditions []expression.Expression, indexColumns []*expression.Column, + idxColLens []int) (indexConds, tableConds []expression.Expression) { + var indexConditions, tableConditions []expression.Expression + for _, cond := range conditions { + var covered bool + if ds.SCtx().GetSessionVars().OptPrefixIndexSingleScan { + covered = isIndexCoveringCondition(ds, cond, indexColumns, idxColLens) + } else { + covered = isIndexCoveringColumns(ds, expression.ExtractColumns(cond), indexColumns, idxColLens) + } + if covered { + indexConditions = append(indexConditions, cond) + } else { + tableConditions = append(tableConditions, cond) + } + } + return indexConditions, tableConditions +} + +// GetPhysicalScan4LogicalTableScan returns PhysicalTableScan for the LogicalTableScan. +func GetPhysicalScan4LogicalTableScan(s *LogicalTableScan, schema *expression.Schema, stats *property.StatsInfo) *PhysicalTableScan { + ds := s.Source + ts := PhysicalTableScan{ + Table: ds.TableInfo, + Columns: ds.Columns, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + isPartition: ds.PartitionDefIdx != nil, + physicalTableID: ds.PhysicalTableID, + Ranges: s.Ranges, + AccessCondition: s.AccessConds, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, + }.Init(s.SCtx(), s.QueryBlockOffset()) + ts.SetStats(stats) + ts.SetSchema(schema.Clone()) + return ts +} + +// GetPhysicalIndexScan4LogicalIndexScan returns PhysicalIndexScan for the logical IndexScan. +func GetPhysicalIndexScan4LogicalIndexScan(s *LogicalIndexScan, _ *expression.Schema, stats *property.StatsInfo) *PhysicalIndexScan { + ds := s.Source + is := PhysicalIndexScan{ + Table: ds.TableInfo, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + Columns: s.Columns, + Index: s.Index, + IdxCols: s.IdxCols, + IdxColLens: s.IdxColLens, + AccessCondition: s.AccessConds, + Ranges: s.Ranges, + dataSourceSchema: ds.Schema(), + isPartition: ds.PartitionDefIdx != nil, + physicalTableID: ds.PhysicalTableID, + tblColHists: ds.TblColHists, + pkIsHandleCol: ds.getPKIsHandleCol(), + }.Init(ds.SCtx(), ds.QueryBlockOffset()) + is.SetStats(stats) + is.initSchema(s.FullIdxCols, s.IsDoubleRead) + return is +} + +// isPointGetPath indicates whether the conditions are point-get-able. +// eg: create table t(a int, b int,c int unique, primary (a,b)) +// select * from t where a = 1 and b = 1 and c =1; +// the datasource can access by primary key(a,b) or unique key c which are both point-get-able +func isPointGetPath(ds *DataSource, path *util.AccessPath) bool { + if len(path.Ranges) < 1 { + return false + } + if !path.IsIntHandlePath { + if path.Index == nil { + return false + } + if !path.Index.Unique || path.Index.HasPrefixIndex() { + return false + } + idxColsLen := len(path.Index.Columns) + for _, ran := range path.Ranges { + if len(ran.LowVal) != idxColsLen { + return false + } + } + } + tc := ds.SCtx().GetSessionVars().StmtCtx.TypeCtx() + for _, ran := range path.Ranges { + if !ran.IsPointNonNullable(tc) { + return false + } + } + return true +} + +// convertToTableScan converts the DataSource to table scan. +func convertToTableScan(ds *DataSource, prop *property.PhysicalProperty, candidate *candidatePath, _ *optimizetrace.PhysicalOptimizeOp) (base.Task, error) { + // It will be handled in convertToIndexScan. + if prop.TaskTp == property.CopMultiReadTaskType { + return base.InvalidTask, nil + } + if !prop.IsSortItemEmpty() && !candidate.isMatchProp { + return base.InvalidTask, nil + } + // If we need to keep order for the index scan, we should forbid the non-keep-order index scan when we try to generate the path. + if prop.IsSortItemEmpty() && candidate.path.ForceKeepOrder { + return base.InvalidTask, nil + } + // If we don't need to keep order for the index scan, we should forbid the non-keep-order index scan when we try to generate the path. + if !prop.IsSortItemEmpty() && candidate.path.ForceNoKeepOrder { + return base.InvalidTask, nil + } + ts, _ := getOriginalPhysicalTableScan(ds, prop, candidate.path, candidate.isMatchProp) + if ts.KeepOrder && ts.StoreType == kv.TiFlash && (ts.Desc || ds.SCtx().GetSessionVars().TiFlashFastScan) { + // TiFlash fast mode(https://github.com/pingcap/tidb/pull/35851) does not keep order in TableScan + return base.InvalidTask, nil + } + + // In disaggregated tiflash mode, only MPP is allowed, cop and batchCop is deprecated. + // So if prop.TaskTp is RootTaskType, have to use mppTask then convert to rootTask. + isTiFlashPath := ts.StoreType == kv.TiFlash + canMppConvertToRoot := prop.TaskTp == property.RootTaskType && ds.SCtx().GetSessionVars().IsMPPAllowed() && isTiFlashPath + canMppConvertToRootForDisaggregatedTiFlash := config.GetGlobalConfig().DisaggregatedTiFlash && canMppConvertToRoot + canMppConvertToRootForWhenTiFlashCopIsBanned := ds.SCtx().GetSessionVars().IsTiFlashCopBanned() && canMppConvertToRoot + if prop.TaskTp == property.MppTaskType || canMppConvertToRootForDisaggregatedTiFlash || canMppConvertToRootForWhenTiFlashCopIsBanned { + if ts.KeepOrder { + return base.InvalidTask, nil + } + if prop.MPPPartitionTp != property.AnyType { + return base.InvalidTask, nil + } + // ********************************** future deprecated start **************************/ + var hasVirtualColumn bool + for _, col := range ts.schema.Columns { + if col.VirtualExpr != nil { + ds.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because column `" + col.OrigName + "` is a virtual column which is not supported now.") + hasVirtualColumn = true + break + } + } + // in general, since MPP has supported the Gather operator to fill the virtual column, we should full lift restrictions here. + // we left them here, because cases like: + // parent-----+ + // V (when parent require a root task type here, we need convert mpp task to root task) + // projection [mpp task] [a] + // table-scan [mpp task] [a(virtual col as: b+1), b] + // in the process of converting mpp task to root task, the encapsulated table reader will use its first children schema [a] + // as its schema, so when we resolve indices later, the virtual column 'a' itself couldn't resolve itself anymore. + // + if hasVirtualColumn && !canMppConvertToRootForDisaggregatedTiFlash && !canMppConvertToRootForWhenTiFlashCopIsBanned { + return base.InvalidTask, nil + } + // ********************************** future deprecated end **************************/ + mppTask := &MppTask{ + p: ts, + partTp: property.AnyType, + tblColHists: ds.TblColHists, + } + ts.PlanPartInfo = &PhysPlanPartInfo{ + PruningConds: pushDownNot(ds.SCtx().GetExprCtx(), ds.AllConds), + PartitionNames: ds.PartitionNames, + Columns: ds.TblCols, + ColumnNames: ds.OutputNames(), + } + mppTask = ts.addPushedDownSelectionToMppTask(mppTask, ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt)) + var task base.Task = mppTask + if !mppTask.Invalid() { + if prop.TaskTp == property.MppTaskType && len(mppTask.rootTaskConds) > 0 { + // If got filters cannot be pushed down to tiflash, we have to make sure it will be executed in TiDB, + // So have to return a rootTask, but prop requires mppTask, cannot meet this requirement. + task = base.InvalidTask + } else if prop.TaskTp == property.RootTaskType { + // When got here, canMppConvertToRootX is true. + // This is for situations like cannot generate mppTask for some operators. + // Such as when the build side of HashJoin is Projection, + // which cannot pushdown to tiflash(because TiFlash doesn't support some expr in Proj) + // So HashJoin cannot pushdown to tiflash. But we still want TableScan to run on tiflash. + task = mppTask + task = task.ConvertToRootTask(ds.SCtx()) + } + } + return task, nil + } + if isTiFlashPath && config.GetGlobalConfig().DisaggregatedTiFlash || isTiFlashPath && ds.SCtx().GetSessionVars().IsTiFlashCopBanned() { + // prop.TaskTp is cop related, just return base.InvalidTask. + return base.InvalidTask, nil + } + copTask := &CopTask{ + tablePlan: ts, + indexPlanFinished: true, + tblColHists: ds.TblColHists, + } + copTask.physPlanPartInfo = &PhysPlanPartInfo{ + PruningConds: pushDownNot(ds.SCtx().GetExprCtx(), ds.AllConds), + PartitionNames: ds.PartitionNames, + Columns: ds.TblCols, + ColumnNames: ds.OutputNames(), + } + ts.PlanPartInfo = copTask.physPlanPartInfo + var task base.Task = copTask + if candidate.isMatchProp { + copTask.keepOrder = true + if ds.TableInfo.GetPartitionInfo() != nil { + // TableScan on partition table on TiFlash can't keep order. + if ts.StoreType == kv.TiFlash { + return base.InvalidTask, nil + } + // Add sort items for table scan for merge-sort operation between partitions. + byItems := make([]*util.ByItems, 0, len(prop.SortItems)) + for _, si := range prop.SortItems { + byItems = append(byItems, &util.ByItems{ + Expr: si.Col, + Desc: si.Desc, + }) + } + ts.ByItems = byItems + } + } + ts.addPushedDownSelection(copTask, ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt)) + if prop.IsFlashProp() && len(copTask.rootTaskConds) != 0 { + return base.InvalidTask, nil + } + if prop.TaskTp == property.RootTaskType { + task = task.ConvertToRootTask(ds.SCtx()) + } else if _, ok := task.(*RootTask); ok { + return base.InvalidTask, nil + } + return task, nil +} + +func convertToSampleTable(ds *DataSource, prop *property.PhysicalProperty, + candidate *candidatePath, _ *optimizetrace.PhysicalOptimizeOp) (base.Task, error) { + if prop.TaskTp == property.CopMultiReadTaskType { + return base.InvalidTask, nil + } + if !prop.IsSortItemEmpty() && !candidate.isMatchProp { + return base.InvalidTask, nil + } + if candidate.isMatchProp { + // Disable keep order property for sample table path. + return base.InvalidTask, nil + } + p := PhysicalTableSample{ + TableSampleInfo: ds.SampleInfo, + TableInfo: ds.table, + PhysicalTableID: ds.PhysicalTableID, + Desc: candidate.isMatchProp && prop.SortItems[0].Desc, + }.Init(ds.SCtx(), ds.QueryBlockOffset()) + p.schema = ds.Schema() + rt := &RootTask{} + rt.SetPlan(p) + return rt, nil +} + +func convertToPointGet(ds *DataSource, prop *property.PhysicalProperty, candidate *candidatePath) base.Task { + if !prop.IsSortItemEmpty() && !candidate.isMatchProp { + return base.InvalidTask + } + if prop.TaskTp == property.CopMultiReadTaskType && candidate.path.IsSingleScan || + prop.TaskTp == property.CopSingleReadTaskType && !candidate.path.IsSingleScan { + return base.InvalidTask + } + + if tidbutil.IsMemDB(ds.DBName.L) { + return base.InvalidTask + } + + accessCnt := math.Min(candidate.path.CountAfterAccess, float64(1)) + pointGetPlan := PointGetPlan{ + ctx: ds.SCtx(), + AccessConditions: candidate.path.AccessConds, + schema: ds.Schema().Clone(), + dbName: ds.DBName.L, + TblInfo: ds.TableInfo, + outputNames: ds.OutputNames(), + LockWaitTime: ds.SCtx().GetSessionVars().LockWaitTimeout, + Columns: ds.Columns, + }.Init(ds.SCtx(), ds.TableStats.ScaleByExpectCnt(accessCnt), ds.QueryBlockOffset()) + if ds.PartitionDefIdx != nil { + pointGetPlan.PartitionIdx = ds.PartitionDefIdx + } + pointGetPlan.PartitionNames = ds.PartitionNames + rTsk := &RootTask{} + rTsk.SetPlan(pointGetPlan) + if candidate.path.IsIntHandlePath { + pointGetPlan.Handle = kv.IntHandle(candidate.path.Ranges[0].LowVal[0].GetInt64()) + pointGetPlan.UnsignedHandle = mysql.HasUnsignedFlag(ds.HandleCols.GetCol(0).RetType.GetFlag()) + pointGetPlan.accessCols = ds.TblCols + found := false + for i := range ds.Columns { + if ds.Columns[i].ID == ds.HandleCols.GetCol(0).ID { + pointGetPlan.HandleColOffset = ds.Columns[i].Offset + found = true + break + } + } + if !found { + return base.InvalidTask + } + // Add filter condition to table plan now. + if len(candidate.path.TableFilters) > 0 { + sel := PhysicalSelection{ + Conditions: candidate.path.TableFilters, + }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) + sel.SetChildren(pointGetPlan) + rTsk.SetPlan(sel) + } + } else { + pointGetPlan.IndexInfo = candidate.path.Index + pointGetPlan.IdxCols = candidate.path.IdxCols + pointGetPlan.IdxColLens = candidate.path.IdxColLens + pointGetPlan.IndexValues = candidate.path.Ranges[0].LowVal + if candidate.path.IsSingleScan { + pointGetPlan.accessCols = candidate.path.IdxCols + } else { + pointGetPlan.accessCols = ds.TblCols + } + // Add index condition to table plan now. + if len(candidate.path.IndexFilters)+len(candidate.path.TableFilters) > 0 { + sel := PhysicalSelection{ + Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...), + }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) + sel.SetChildren(pointGetPlan) + rTsk.SetPlan(sel) + } + } + + return rTsk +} + +func convertToBatchPointGet(ds *DataSource, prop *property.PhysicalProperty, candidate *candidatePath) base.Task { + if !prop.IsSortItemEmpty() && !candidate.isMatchProp { + return base.InvalidTask + } + if prop.TaskTp == property.CopMultiReadTaskType && candidate.path.IsSingleScan || + prop.TaskTp == property.CopSingleReadTaskType && !candidate.path.IsSingleScan { + return base.InvalidTask + } + + accessCnt := math.Min(candidate.path.CountAfterAccess, float64(len(candidate.path.Ranges))) + batchPointGetPlan := &BatchPointGetPlan{ + ctx: ds.SCtx(), + dbName: ds.DBName.L, + AccessConditions: candidate.path.AccessConds, + TblInfo: ds.TableInfo, + KeepOrder: !prop.IsSortItemEmpty(), + Columns: ds.Columns, + PartitionNames: ds.PartitionNames, + } + if ds.PartitionDefIdx != nil { + batchPointGetPlan.SinglePartition = true + batchPointGetPlan.PartitionIdxs = []int{*ds.PartitionDefIdx} + } + if batchPointGetPlan.KeepOrder { + batchPointGetPlan.Desc = prop.SortItems[0].Desc + } + rTsk := &RootTask{} + if candidate.path.IsIntHandlePath { + for _, ran := range candidate.path.Ranges { + batchPointGetPlan.Handles = append(batchPointGetPlan.Handles, kv.IntHandle(ran.LowVal[0].GetInt64())) + } + batchPointGetPlan.accessCols = ds.TblCols + found := false + for i := range ds.Columns { + if ds.Columns[i].ID == ds.HandleCols.GetCol(0).ID { + batchPointGetPlan.HandleColOffset = ds.Columns[i].Offset + found = true + break + } + } + if !found { + return base.InvalidTask + } + + // Add filter condition to table plan now. + if len(candidate.path.TableFilters) > 0 { + batchPointGetPlan.Init(ds.SCtx(), ds.TableStats.ScaleByExpectCnt(accessCnt), ds.Schema().Clone(), ds.OutputNames(), ds.QueryBlockOffset()) + sel := PhysicalSelection{ + Conditions: candidate.path.TableFilters, + }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) + sel.SetChildren(batchPointGetPlan) + rTsk.SetPlan(sel) + } + } else { + batchPointGetPlan.IndexInfo = candidate.path.Index + batchPointGetPlan.IdxCols = candidate.path.IdxCols + batchPointGetPlan.IdxColLens = candidate.path.IdxColLens + for _, ran := range candidate.path.Ranges { + batchPointGetPlan.IndexValues = append(batchPointGetPlan.IndexValues, ran.LowVal) + } + if !prop.IsSortItemEmpty() { + batchPointGetPlan.KeepOrder = true + batchPointGetPlan.Desc = prop.SortItems[0].Desc + } + if candidate.path.IsSingleScan { + batchPointGetPlan.accessCols = candidate.path.IdxCols + } else { + batchPointGetPlan.accessCols = ds.TblCols + } + // Add index condition to table plan now. + if len(candidate.path.IndexFilters)+len(candidate.path.TableFilters) > 0 { + batchPointGetPlan.Init(ds.SCtx(), ds.TableStats.ScaleByExpectCnt(accessCnt), ds.Schema().Clone(), ds.OutputNames(), ds.QueryBlockOffset()) + sel := PhysicalSelection{ + Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...), + }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) + sel.SetChildren(batchPointGetPlan) + rTsk.SetPlan(sel) + } + } + if rTsk.GetPlan() == nil { + tmpP := batchPointGetPlan.Init(ds.SCtx(), ds.TableStats.ScaleByExpectCnt(accessCnt), ds.Schema().Clone(), ds.OutputNames(), ds.QueryBlockOffset()) + rTsk.SetPlan(tmpP) + } + + return rTsk +} + +func (ts *PhysicalTableScan) addPushedDownSelectionToMppTask(mpp *MppTask, stats *property.StatsInfo) *MppTask { + filterCondition, rootTaskConds := SplitSelCondsWithVirtualColumn(ts.filterCondition) + var newRootConds []expression.Expression + filterCondition, newRootConds = expression.PushDownExprs(util.GetPushDownCtx(ts.SCtx()), filterCondition, ts.StoreType) + mpp.rootTaskConds = append(rootTaskConds, newRootConds...) + + ts.filterCondition = filterCondition + // Add filter condition to table plan now. + if len(ts.filterCondition) > 0 { + sel := PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.SCtx(), stats, ts.QueryBlockOffset()) + sel.SetChildren(ts) + mpp.p = sel + } + return mpp +} + +func (ts *PhysicalTableScan) addPushedDownSelection(copTask *CopTask, stats *property.StatsInfo) { + ts.filterCondition, copTask.rootTaskConds = SplitSelCondsWithVirtualColumn(ts.filterCondition) + var newRootConds []expression.Expression + ts.filterCondition, newRootConds = expression.PushDownExprs(util.GetPushDownCtx(ts.SCtx()), ts.filterCondition, ts.StoreType) + copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...) + + // Add filter condition to table plan now. + if len(ts.filterCondition) > 0 { + sel := PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.SCtx(), stats, ts.QueryBlockOffset()) + if len(copTask.rootTaskConds) != 0 { + selectivity, _, err := cardinality.Selectivity(ts.SCtx(), copTask.tblColHists, ts.filterCondition, nil) + if err != nil { + logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) + selectivity = cost.SelectionFactor + } + sel.SetStats(ts.StatsInfo().Scale(selectivity)) + } + sel.SetChildren(ts) + copTask.tablePlan = sel + } +} + +func (ts *PhysicalTableScan) getScanRowSize() float64 { + if ts.StoreType == kv.TiKV { + return cardinality.GetTableAvgRowSize(ts.SCtx(), ts.tblColHists, ts.tblCols, ts.StoreType, true) + } + // If `ts.handleCol` is nil, then the schema of tableScan doesn't have handle column. + // This logic can be ensured in column pruning. + return cardinality.GetTableAvgRowSize(ts.SCtx(), ts.tblColHists, ts.Schema().Columns, ts.StoreType, ts.HandleCols != nil) +} + +func getOriginalPhysicalTableScan(ds *DataSource, prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool) (*PhysicalTableScan, float64) { + ts := PhysicalTableScan{ + Table: ds.TableInfo, + Columns: slices.Clone(ds.Columns), + TableAsName: ds.TableAsName, + DBName: ds.DBName, + isPartition: ds.PartitionDefIdx != nil, + physicalTableID: ds.PhysicalTableID, + Ranges: path.Ranges, + AccessCondition: path.AccessConds, + StoreType: path.StoreType, + HandleCols: ds.HandleCols, + tblCols: ds.TblCols, + tblColHists: ds.TblColHists, + constColsByCond: path.ConstCols, + prop: prop, + filterCondition: slices.Clone(path.TableFilters), + }.Init(ds.SCtx(), ds.QueryBlockOffset()) + ts.SetSchema(ds.Schema().Clone()) + rowCount := path.CountAfterAccess + if prop.ExpectedCnt < ds.StatsInfo().RowCount { + rowCount = cardinality.AdjustRowCountForTableScanByLimit(ds.SCtx(), + ds.StatsInfo(), ds.TableStats, ds.StatisticTable, + path, prop.ExpectedCnt, isMatchProp && prop.SortItems[0].Desc) + } + // We need NDV of columns since it may be used in cost estimation of join. Precisely speaking, + // we should track NDV of each histogram bucket, and sum up the NDV of buckets we actually need + // to scan, but this would only help improve accuracy of NDV for one column, for other columns, + // we still need to assume values are uniformly distributed. For simplicity, we use uniform-assumption + // for all columns now, as we do in `deriveStatsByFilter`. + ts.SetStats(ds.TableStats.ScaleByExpectCnt(rowCount)) + usedStats := ds.SCtx().GetSessionVars().StmtCtx.GetUsedStatsInfo(false) + if usedStats != nil && usedStats.GetUsedInfo(ts.physicalTableID) != nil { + ts.usedStatsInfo = usedStats.GetUsedInfo(ts.physicalTableID) + } + if isMatchProp { + ts.Desc = prop.SortItems[0].Desc + ts.KeepOrder = true + } + return ts, rowCount +} + +func getOriginalPhysicalIndexScan(ds *DataSource, prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool, isSingleScan bool) *PhysicalIndexScan { + idx := path.Index + is := PhysicalIndexScan{ + Table: ds.TableInfo, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + Columns: util.CloneColInfos(ds.Columns), + Index: idx, + IdxCols: path.IdxCols, + IdxColLens: path.IdxColLens, + AccessCondition: path.AccessConds, + Ranges: path.Ranges, + dataSourceSchema: ds.Schema(), + isPartition: ds.PartitionDefIdx != nil, + physicalTableID: ds.PhysicalTableID, + tblColHists: ds.TblColHists, + pkIsHandleCol: ds.getPKIsHandleCol(), + constColsByCond: path.ConstCols, + prop: prop, + }.Init(ds.SCtx(), ds.QueryBlockOffset()) + rowCount := path.CountAfterAccess + is.initSchema(append(path.FullIdxCols, ds.CommonHandleCols...), !isSingleScan) + + // If (1) there exists an index whose selectivity is smaller than the threshold, + // and (2) there is Selection on the IndexScan, we don't use the ExpectedCnt to + // adjust the estimated row count of the IndexScan. + ignoreExpectedCnt := ds.AccessPathMinSelectivity < ds.SCtx().GetSessionVars().OptOrderingIdxSelThresh && + len(path.IndexFilters)+len(path.TableFilters) > 0 + + if (isMatchProp || prop.IsSortItemEmpty()) && prop.ExpectedCnt < ds.StatsInfo().RowCount && !ignoreExpectedCnt { + rowCount = cardinality.AdjustRowCountForIndexScanByLimit(ds.SCtx(), + ds.StatsInfo(), ds.TableStats, ds.StatisticTable, + path, prop.ExpectedCnt, isMatchProp && prop.SortItems[0].Desc) + } + // ScaleByExpectCnt only allows to scale the row count smaller than the table total row count. + // But for MV index, it's possible that the IndexRangeScan row count is larger than the table total row count. + // Please see the Case 2 in CalcTotalSelectivityForMVIdxPath for an example. + if idx.MVIndex && rowCount > ds.TableStats.RowCount { + is.SetStats(ds.TableStats.Scale(rowCount / ds.TableStats.RowCount)) + } else { + is.SetStats(ds.TableStats.ScaleByExpectCnt(rowCount)) + } + usedStats := ds.SCtx().GetSessionVars().StmtCtx.GetUsedStatsInfo(false) + if usedStats != nil && usedStats.GetUsedInfo(is.physicalTableID) != nil { + is.usedStatsInfo = usedStats.GetUsedInfo(is.physicalTableID) + } + if isMatchProp { + is.Desc = prop.SortItems[0].Desc + is.KeepOrder = true + } + return is +} + +func findBestTask4LogicalCTE(lp base.LogicalPlan, prop *property.PhysicalProperty, counter *base.PlanCounterTp, pop *optimizetrace.PhysicalOptimizeOp) (t base.Task, cntPlan int64, err error) { + p := lp.(*logicalop.LogicalCTE) + if p.ChildLen() > 0 { + return p.BaseLogicalPlan.FindBestTask(prop, counter, pop) + } + if !prop.IsSortItemEmpty() && !prop.CanAddEnforcer { + return base.InvalidTask, 1, nil + } + // The physical plan has been build when derive stats. + pcte := PhysicalCTE{SeedPlan: p.Cte.SeedPartPhysicalPlan, RecurPlan: p.Cte.RecursivePartPhysicalPlan, CTE: p.Cte, cteAsName: p.CteAsName, cteName: p.CteName}.Init(p.SCtx(), p.StatsInfo()) + pcte.SetSchema(p.Schema()) + if prop.IsFlashProp() && prop.CTEProducerStatus == property.AllCTECanMpp { + pcte.readerReceiver = PhysicalExchangeReceiver{IsCTEReader: true}.Init(p.SCtx(), p.StatsInfo()) + if prop.MPPPartitionTp != property.AnyType { + return base.InvalidTask, 1, nil + } + t = &MppTask{ + p: pcte, + partTp: prop.MPPPartitionTp, + hashCols: prop.MPPPartitionCols, + tblColHists: p.StatsInfo().HistColl, + } + } else { + rt := &RootTask{} + rt.SetPlan(pcte) + rt.SetEmpty(false) + t = rt + } + if prop.CanAddEnforcer { + t = enforceProperty(prop, t, p.Plan.SCtx()) + } + return t, 1, nil +} + +func findBestTask4LogicalCTETable(lp base.LogicalPlan, prop *property.PhysicalProperty, _ *base.PlanCounterTp, _ *optimizetrace.PhysicalOptimizeOp) (t base.Task, cntPlan int64, err error) { + p := lp.(*logicalop.LogicalCTETable) + if !prop.IsSortItemEmpty() { + return base.InvalidTask, 0, nil + } + + pcteTable := PhysicalCTETable{IDForStorage: p.IDForStorage}.Init(p.SCtx(), p.StatsInfo()) + pcteTable.SetSchema(p.Schema()) + rt := &RootTask{} + rt.SetPlan(pcteTable) + t = rt + return t, 1, nil +} + +func appendCandidate(lp base.LogicalPlan, task base.Task, prop *property.PhysicalProperty, opt *optimizetrace.PhysicalOptimizeOp) { + if task == nil || task.Invalid() { + return + } + utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, lp, task.Plan(), prop) +} + +// PushDownNot here can convert condition 'not (a != 1)' to 'a = 1'. When we build range from conds, the condition like +// 'not (a != 1)' would not be handled so we need to convert it to 'a = 1', which can be handled when building range. +func pushDownNot(ctx expression.BuildContext, conds []expression.Expression) []expression.Expression { + for i, cond := range conds { + conds[i] = expression.PushDownNot(ctx, cond) + } + return conds +} + +func validateTableSamplePlan(ds *DataSource, t base.Task, err error) error { + if err != nil { + return err + } + if ds.SampleInfo != nil && !t.Invalid() { + if _, ok := t.Plan().(*PhysicalTableSample); !ok { + return expression.ErrInvalidTableSample.GenWithStackByArgs("plan not supported") + } + } + return nil +} From 7b56ddfaf989380411d17c54b8aa515f0de6110a Mon Sep 17 00:00:00 2001 From: arenatlx <314806019@qq.com> Date: Mon, 11 Nov 2024 15:59:57 +0800 Subject: [PATCH 2/2] . Signed-off-by: arenatlx <314806019@qq.com> --- executor/tiflashtest/BUILD.bazel | 6 +- executor/tiflashtest/tiflash_test.go | 389 +- .../testdata/plan_suite_out.json | 4011 ----------------- pkg/planner/core/find_best_task.go | 3016 ------------- .../casetest/testdata/plan_suite_out.json | 6 +- planner/core/find_best_task.go | 4 + 6 files changed, 10 insertions(+), 7422 deletions(-) delete mode 100644 pkg/planner/core/casetest/physicalplantest/testdata/plan_suite_out.json delete mode 100644 pkg/planner/core/find_best_task.go diff --git a/executor/tiflashtest/BUILD.bazel b/executor/tiflashtest/BUILD.bazel index d8445cbf09996..0af1c5394cfa7 100644 --- a/executor/tiflashtest/BUILD.bazel +++ b/executor/tiflashtest/BUILD.bazel @@ -9,11 +9,7 @@ go_test( ], flaky = True, race = "on", -<<<<<<< HEAD:executor/tiflashtest/BUILD.bazel - shard_count = 38, -======= - shard_count = 45, ->>>>>>> 8df006280e9 (planner: make converge index merge path feel the prefer tiflash hint (#56227)):pkg/executor/test/tiflashtest/BUILD.bazel + shard_count = 39, deps = [ "//config", "//domain", diff --git a/executor/tiflashtest/tiflash_test.go b/executor/tiflashtest/tiflash_test.go index 103348ad99c91..9c6b9084223a3 100644 --- a/executor/tiflashtest/tiflash_test.go +++ b/executor/tiflashtest/tiflash_test.go @@ -1738,390 +1738,6 @@ func TestMppStoreCntWithErrors(t *testing.T) { require.Nil(t, failpoint.Disable(mppStoreCountSetLastUpdateTimeP2)) require.Nil(t, failpoint.Disable(mppStoreCountPDError)) } -<<<<<<< HEAD:executor/tiflashtest/tiflash_test.go -======= - -func TestMPP47766(t *testing.T) { - store := testkit.CreateMockStore(t, withMockTiFlash(1)) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_allow_mpp=1") - tk.MustExec("set @@session.tidb_enforce_mpp=1") - tk.MustExec("set @@session.tidb_allow_tiflash_cop=off") - - tk.MustExec("CREATE TABLE `traces` (" + - " `test_time` timestamp NOT NULL," + - " `test_time_gen` date GENERATED ALWAYS AS (date(`test_time`)) VIRTUAL," + - " KEY `traces_date_idx` (`test_time_gen`)" + - ")") - tk.MustExec("alter table `traces` set tiflash replica 1") - tb := external.GetTableByName(t, tk, "test", "traces") - err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - tk.MustQuery("explain select date(test_time), count(1) as test_date from `traces` group by 1").Check(testkit.Rows( - "Projection_4 8000.00 root test.traces.test_time_gen->Column#5, Column#4", - "└─HashAgg_8 8000.00 root group by:test.traces.test_time_gen, funcs:count(1)->Column#4, funcs:firstrow(test.traces.test_time_gen)->test.traces.test_time_gen", - " └─TableReader_20 10000.00 root MppVersion: 2, data:ExchangeSender_19", - " └─ExchangeSender_19 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─TableFullScan_18 10000.00 mpp[tiflash] table:traces keep order:false, stats:pseudo")) - tk.MustQuery("explain select /*+ read_from_storage(tiflash[traces]) */ date(test_time) as test_date, count(1) from `traces` group by 1").Check(testkit.Rows( - "TableReader_31 8000.00 root MppVersion: 2, data:ExchangeSender_30", - "└─ExchangeSender_30 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection_5 8000.00 mpp[tiflash] date(test.traces.test_time)->Column#5, Column#4", - " └─Projection_26 8000.00 mpp[tiflash] Column#4, test.traces.test_time", - " └─HashAgg_27 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#4, funcs:firstrow(Column#15)->test.traces.test_time", - " └─ExchangeReceiver_29 8000.00 mpp[tiflash] ", - " └─ExchangeSender_28 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", - " └─HashAgg_25 8000.00 mpp[tiflash] group by:Column#17, funcs:count(1)->Column#14, funcs:firstrow(Column#16)->Column#15", - " └─Projection_32 10000.00 mpp[tiflash] test.traces.test_time->Column#16, date(test.traces.test_time)->Column#17", - " └─TableFullScan_15 10000.00 mpp[tiflash] table:traces keep order:false, stats:pseudo")) -} - -func TestUnionScan(t *testing.T) { - store := testkit.CreateMockStore(t, withMockTiFlash(2)) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_allow_mpp=1") - tk.MustExec("set @@session.tidb_enforce_mpp=1") - tk.MustExec("set @@session.tidb_allow_tiflash_cop=off") - - for x := 0; x < 2; x++ { - tk.MustExec("drop table if exists t") - if x == 0 { - // Test cache table. - tk.MustExec("create table t(a int not null primary key, b int not null)") - tk.MustExec("alter table t set tiflash replica 1") - tb := external.GetTableByName(t, tk, "test", "t") - err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - tk.MustExec("alter table t cache") - } else { - // Test dirty transaction. - tk.MustExec("create table t(a int not null primary key, b int not null) partition by hash(a) partitions 2") - tk.MustExec("alter table t set tiflash replica 1") - tb := external.GetTableByName(t, tk, "test", "t") - err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - } - - insertStr := "insert into t values(0, 0)" - for i := 1; i < 10; i++ { - insertStr += fmt.Sprintf(",(%d, %d)", i, i) - } - tk.MustExec(insertStr) - - if x != 0 { - // Test dirty transaction. - tk.MustExec("begin") - } - - // Test Basic. - sql := "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" - checkMPPInExplain(t, tk, "explain "+sql) - tk.MustQuery(sql).Check(testkit.Rows("10")) - - // Test Delete. - tk.MustExec("delete from t where a = 0") - - sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" - checkMPPInExplain(t, tk, "explain "+sql) - tk.MustQuery(sql).Check(testkit.Rows("9")) - - sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ a, b from t order by 1" - checkMPPInExplain(t, tk, "explain "+sql) - tk.MustQuery(sql).Check(testkit.Rows("1 1", "2 2", "3 3", "4 4", "5 5", "6 6", "7 7", "8 8", "9 9")) - - // Test Insert. - tk.MustExec("insert into t values(100, 100)") - - sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" - checkMPPInExplain(t, tk, "explain "+sql) - tk.MustQuery(sql).Check(testkit.Rows("10")) - - sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ a, b from t order by 1, 2" - checkMPPInExplain(t, tk, "explain "+sql) - tk.MustQuery(sql).Check(testkit.Rows("1 1", "2 2", "3 3", "4 4", "5 5", "6 6", "7 7", "8 8", "9 9", "100 100")) - - // Test Update - tk.MustExec("update t set b = 200 where a = 100") - - sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" - checkMPPInExplain(t, tk, "explain "+sql) - tk.MustQuery(sql).Check(testkit.Rows("10")) - - sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ a, b from t order by 1, 2" - checkMPPInExplain(t, tk, "explain "+sql) - tk.MustQuery(sql).Check(testkit.Rows("1 1", "2 2", "3 3", "4 4", "5 5", "6 6", "7 7", "8 8", "9 9", "100 200")) - - if x != 0 { - // Test dirty transaction. - tk.MustExec("commit") - } - - sql = "select /*+ READ_FROM_STORAGE(tiflash[t]) */ count(1) from t" - checkMPPInExplain(t, tk, "explain "+sql) - tk.MustQuery(sql).Check(testkit.Rows("10")) - - if x == 0 { - tk.MustExec("alter table t nocache") - } - } -} - -func checkMPPInExplain(t *testing.T, tk *testkit.TestKit, sql string) { - rows := tk.MustQuery(sql).Rows() - resBuff := bytes.NewBufferString("") - for _, row := range rows { - fmt.Fprintf(resBuff, "%s\n", row) - } - res := resBuff.String() - require.Contains(t, res, "mpp[tiflash]") -} - -func TestMPPRecovery(t *testing.T) { - store := testkit.CreateMockStore(t, withMockTiFlash(2)) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("create table t(a int, b int)") - tk.MustExec("alter table t set tiflash replica 1") - tb := external.GetTableByName(t, tk, "test", "t") - err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - - checkStrs := []string{"0 0"} - insertStr := "insert into t values(0, 0)" - for i := 1; i < 1500; i++ { - insertStr += fmt.Sprintf(",(%d, %d)", i, i) - checkStrs = append(checkStrs, fmt.Sprintf("%d %d", i, i)) - } - tk.MustExec(insertStr) - tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") - sql := "select * from t order by 1, 2" - const packagePath = "github.com/pingcap/tidb/pkg/executor/internal/mpp/" - - require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_mock_enable", "return()")) - require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_ignore_recovery_err", "return()")) - // Test different chunk size. And force one mpp err. - { - require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_max_err_times", "return(1)")) - - tk.MustExec("set @@tidb_max_chunk_size = default") - tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) - tk.MustExec("set @@tidb_max_chunk_size = 32") - tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) - - require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_max_err_times")) - } - - // Test exceeds max recovery times. Default max times is 3. - { - require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_max_err_times", "return(5)")) - - tk.MustExec("set @@tidb_max_chunk_size = 32") - err := tk.QueryToErr(sql) - strings.Contains(err.Error(), "mock mpp err") - - require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_max_err_times")) - } - - { - // When AllowFallbackToTiKV, mpp err recovery should be disabled. - // So event we inject mock err multiple times, the query should be ok. - tk.MustExec("set @@tidb_allow_fallback_to_tikv = \"tiflash\"") - require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_max_err_times", "return(5)")) - - tk.MustExec("set @@tidb_max_chunk_size = 32") - tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) - - tk.MustExec("set @@tidb_allow_fallback_to_tikv = default") - require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_max_err_times")) - } - - // Test hold logic. Default hold 4 * MaxChunkSize rows. - { - require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_max_err_times", "return(0)")) - - tk.MustExec("set @@tidb_max_chunk_size = 32") - expectedHoldSize := 2 - require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_hold_size", fmt.Sprintf("1*return(%d)", expectedHoldSize))) - tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) - require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_hold_size")) - - require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_max_err_times")) - } - require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_ignore_recovery_err")) - require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_mock_enable")) - - { - // We have 2 mock tiflash, but the table is small, so only 1 tiflash node is in computation. - require.NoError(t, failpoint.Enable(packagePath+"mpp_recovery_test_check_node_cnt", "return(1)")) - - tk.MustExec("set @@tidb_max_chunk_size = 32") - tk.MustQuery(sql).Check(testkit.Rows(checkStrs...)) - - require.NoError(t, failpoint.Disable(packagePath+"mpp_recovery_test_check_node_cnt")) - } - - tk.MustExec("set @@tidb_max_chunk_size = default") -} - -func TestIssue50358(t *testing.T) { - store := testkit.CreateMockStore(t, withMockTiFlash(1)) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int not null primary key, b int not null)") - tk.MustExec("alter table t set tiflash replica 1") - tb := external.GetTableByName(t, tk, "test", "t") - err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - tk.MustExec("insert into t values(1,0)") - tk.MustExec("insert into t values(2,0)") - - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1(c int not null primary key)") - tk.MustExec("alter table t1 set tiflash replica 1") - tb = external.GetTableByName(t, tk, "test", "t1") - err = domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - tk.MustExec("insert into t1 values(3)") - - tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") - tk.MustExec("set @@session.tidb_allow_mpp=ON") - for i := 0; i < 20; i++ { - // test if it is stable. - tk.MustQuery("select 8 from t join t1").Check(testkit.Rows("8", "8")) - } -} - -func TestMppAggShouldAlignFinalMode(t *testing.T) { - store := testkit.CreateMockStore(t, withMockTiFlash(1)) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t (" + - " d date," + - " v int," + - " primary key(d, v)" + - ") partition by range columns (d) (" + - " partition p1 values less than ('2023-07-02')," + - " partition p2 values less than ('2023-07-03')" + - ");") - tk.MustExec("alter table t set tiflash replica 1") - tb := external.GetTableByName(t, tk, "test", "t") - err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - tk.MustExec(`set tidb_partition_prune_mode='static';`) - err = failpoint.Enable("github.com/pingcap/tidb/pkg/expression/aggregation/show-agg-mode", "return(true)") - require.Nil(t, err) - - tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") - tk.MustQuery("explain format='brief' select 1 from (" + - " select /*+ read_from_storage(tiflash[t]) */ sum(1)" + - " from t where d BETWEEN '2023-07-01' and '2023-07-03' group by d" + - ") total;").Check(testkit.Rows("Projection 400.00 root 1->Column#4", - "└─HashAgg 400.00 root group by:test.t.d, funcs:count(complete,1)->Column#8", - " └─PartitionUnion 400.00 root ", - " ├─Projection 200.00 root test.t.d", - " │ └─HashAgg 200.00 root group by:test.t.d, funcs:firstrow(partial2,test.t.d)->test.t.d, funcs:count(final,Column#12)->Column#9", - " │ └─TableReader 200.00 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 200.00 mpp[tiflash] ExchangeType: PassThrough", - " │ └─HashAgg 200.00 mpp[tiflash] group by:test.t.d, funcs:count(partial1,1)->Column#12", - " │ └─TableRangeScan 250.00 mpp[tiflash] table:t, partition:p1 range:[2023-07-01,2023-07-03], keep order:false, stats:pseudo", - " └─Projection 200.00 root test.t.d", - " └─HashAgg 200.00 root group by:test.t.d, funcs:firstrow(partial2,test.t.d)->test.t.d, funcs:count(final,Column#14)->Column#10", - " └─TableReader 200.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 200.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 200.00 mpp[tiflash] group by:test.t.d, funcs:count(partial1,1)->Column#14", - " └─TableRangeScan 250.00 mpp[tiflash] table:t, partition:p2 range:[2023-07-01,2023-07-03], keep order:false, stats:pseudo")) - - err = failpoint.Disable("github.com/pingcap/tidb/pkg/expression/aggregation/show-agg-mode") - require.Nil(t, err) -} - -func TestMppTableReaderCacheForSingleSQL(t *testing.T) { - store := testkit.CreateMockStore(t, withMockTiFlash(1)) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t(a int, b int, primary key(a))") - tk.MustExec("alter table t set tiflash replica 1") - tb := external.GetTableByName(t, tk, "test", "t") - err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - - tk.MustExec("create table t2(a int, b int) partition by hash(b) partitions 4") - tk.MustExec("alter table t2 set tiflash replica 1") - tb = external.GetTableByName(t, tk, "test", "t2") - err = domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) - require.NoError(t, err) - tk.MustExec("insert into t values(1, 1)") - tk.MustExec("insert into t values(2, 2)") - tk.MustExec("insert into t values(3, 3)") - tk.MustExec("insert into t values(4, 4)") - tk.MustExec("insert into t values(5, 5)") - - tk.MustExec("insert into t2 values(1, 1)") - tk.MustExec("insert into t2 values(2, 2)") - tk.MustExec("insert into t2 values(3, 3)") - tk.MustExec("insert into t2 values(4, 4)") - tk.MustExec("insert into t2 values(5, 5)") - - tk.MustExec("set @@session.tidb_isolation_read_engines=\"tiflash\"") - tk.MustExec("set @@session.tidb_allow_mpp=ON") - tk.MustExec("set @@session.tidb_enforce_mpp=ON") - tk.MustExec("set @@session.tidb_max_chunk_size=32") - - // Test TableReader cache for single SQL. - type testCase struct { - sql string - expectHitNum int32 - expectMissNum int32 - } - - testCases := []testCase{ - // Non-Partition - // Cache hit - {"select * from t", 0, 1}, - {"select * from t union select * from t", 1, 1}, - {"select * from t union select * from t t1 union select * from t t2", 2, 1}, - {"select * from t where b <= 3 union select * from t where b > 3", 1, 1}, // both full range - {"select * from t where a <= 3 union select * from t where a <= 3", 1, 1}, // same range - {"select * from t t1 join t t2 on t1.b=t2.b", 1, 1}, - - // Cache miss - {"select * from t union all select * from t", 0, 2}, // different mpp task root - {"select * from t where a <= 3 union select * from t where a > 3", 0, 2}, // different range - - // Partition - // Cache hit - {"select * from t2 union select * from t2", 1, 1}, - {"select * from t2 where b = 1 union select * from t2 where b = 5", 1, 1}, // same partition, full range - {"select * from t2 where b = 1 and a < 3 union select * from t2 where b = 5 and a < 3", 1, 1}, // same partition, same range - {"select * from t2 t1 join t2 t2 on t1.b=t2.b", 1, 1}, - {"select * from t2 t1 join t2 t2 on t1.b=t2.b where t1.a = 2 and t2.a = 2", 1, 1}, - - // Cache miss - {"select * from t2 union select * from t2 where b = 1", 0, 2}, // different partition - {"select * from t2 where b = 2 union select * from t2 where b = 1", 0, 2}, // different partition - } - - var hitNum, missNum atomic.Int32 - hitFunc := func() { - hitNum.Add(1) - } - missFunc := func() { - missNum.Add(1) - } - failpoint.EnableCall("github.com/pingcap/tidb/pkg/planner/core/mppTaskGeneratorTableReaderCacheHit", hitFunc) - failpoint.EnableCall("github.com/pingcap/tidb/pkg/planner/core/mppTaskGeneratorTableReaderCacheMiss", missFunc) - for _, tc := range testCases { - hitNum.Store(0) - missNum.Store(0) - tk.MustQuery(tc.sql) - require.Equal(t, tc.expectHitNum, hitNum.Load()) - require.Equal(t, tc.expectMissNum, missNum.Load()) - } -} func TestIndexMergeCarePreferTiflash(t *testing.T) { store := testkit.CreateMockStore(t, withMockTiFlash(1)) @@ -2144,14 +1760,13 @@ func TestIndexMergeCarePreferTiflash(t *testing.T) { ")") tk.MustExec("alter table t set tiflash replica 1") tb := external.GetTableByName(t, tk, "test", "t") - err := domain.GetDomain(tk.Session()).DDLExecutor().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + err := domain.GetDomain(tk.Session()).DDL().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) require.NoError(t, err) tk.MustQuery("explain format=\"brief\" SELECT" + " /*+ read_from_storage(tiflash[a]) */ a.i FROM t a WHERE a.s = 0 AND a.a NOT IN (-1, 0) AND m >= 1726910326 AND m <= 1726910391 AND ( a.w IN ('1123') OR a.l IN ('1123'))").Check( - testkit.Rows("TableReader 0.00 root MppVersion: 2, data:ExchangeSender", + testkit.Rows("TableReader 0.00 root MppVersion: 1, data:ExchangeSender", "└─ExchangeSender 0.00 mpp[tiflash] ExchangeType: PassThrough", " └─Projection 0.00 mpp[tiflash] test.t.i", " └─Selection 0.00 mpp[tiflash] ge(test.t.m, 1726910326), le(test.t.m, 1726910391), not(in(test.t.a, -1, 0)), or(eq(test.t.w, \"1123\"), eq(test.t.l, \"1123\"))", " └─TableFullScan 10.00 mpp[tiflash] table:a pushed down filter:eq(test.t.s, 0), keep order:false, stats:pseudo")) } ->>>>>>> 8df006280e9 (planner: make converge index merge path feel the prefer tiflash hint (#56227)):pkg/executor/test/tiflashtest/tiflash_test.go diff --git a/pkg/planner/core/casetest/physicalplantest/testdata/plan_suite_out.json b/pkg/planner/core/casetest/physicalplantest/testdata/plan_suite_out.json deleted file mode 100644 index d00d6941014cb..0000000000000 --- a/pkg/planner/core/casetest/physicalplantest/testdata/plan_suite_out.json +++ /dev/null @@ -1,4011 +0,0 @@ -[ - { - "Name": "TestMPPHints", - "Cases": [ - { - "SQL": "select /*+ MPP_1PHASE_AGG() */ a, sum(b) from t group by a, c", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#10, Column#9, funcs:sum(Column#8)->Column#5, funcs:firstrow(Column#9)->test.t.a", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#8, test.t.a->Column#9, test.t.c->Column#10", - " └─ExchangeReceiver 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ MPP_2PHASE_AGG() */ a, sum(b) from t group by a, c", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#10)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, Column#14, funcs:sum(Column#12)->Column#10", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#12, test.t.a->Column#13, test.t.c->Column#14", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ shuffle_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ broadcast_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_1PHASE_AGG() */ a, sum(b) from t group by a, c", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#7, Column#8, funcs:sum(Column#6)->Column#5, funcs:firstrow(Column#7)->test.t.a", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#6, test.t.a->Column#7, test.t.c->Column#8", - " └─ExchangeReceiver 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_2PHASE_AGG() */ a, sum(b) from t group by a, c", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#8)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#11, Column#12, funcs:sum(Column#10)->Column#8", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#10, test.t.a->Column#11, test.t.c->Column#12", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t1) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t1) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t1) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_probe(t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_probe(t1) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]The MPP join hints are in conflict, and you can only specify join method hints that are currently supported by MPP mode now" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]The MPP join hints are in conflict, and you can only specify join method hints that are currently supported by MPP mode now" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]The MPP join hints are in conflict, and you can only specify join method hints that are currently supported by MPP mode now" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]The MPP join hints are in conflict, and you can only specify join method hints that are currently supported by MPP mode now" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_1PHASE_AGG(), hash_agg() */ a, sum(b) from t group by a, c", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#7, Column#8, funcs:sum(Column#6)->Column#5, funcs:firstrow(Column#7)->test.t.a", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#6, test.t.a->Column#7, test.t.c->Column#8", - " └─ExchangeReceiver 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_2PHASE_AGG(), stream_agg() */ a, sum(b) from t group by a, c", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#6)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#10, Column#9, funcs:sum(Column#8)->Column#6", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#8, test.t.a->Column#9, test.t.c->Column#10", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_1PHASE_AGG(), use_index(t, idx_a) */ a, sum(b) from t where a > 1 group by a, c", - "Plan": [ - "Projection 2666.67 root test.t.a, Column#5", - "└─HashAgg 2666.67 root group by:test.t.a, test.t.c, funcs:sum(Column#7)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─IndexLookUp 2666.67 root ", - " ├─IndexRangeScan(Build) 3333.33 cop[tikv] table:t, index:idx_a(a) range:(1,+inf], keep order:false, stats:pseudo", - " └─HashAgg(Probe) 2666.67 cop[tikv] group by:test.t.a, test.t.c, funcs:sum(test.t.b)->Column#7", - " └─TableRowIDScan 3333.33 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]No available path for table test.t with the store type tiflash of the hint /*+ read_from_storage */, please check the status of the table replica and variable value of tidb_isolation_read_engines(map[0:{} 1:{} 2:{}])", - "[planner:1815]The agg can not push down to the MPP side, the MPP_1PHASE_AGG() hint is invalid" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_1PHASE_AGG(), ignore_index(t, idx_a) */ a, sum(b) from t where a > 1 group by a, c", - "Plan": [ - "TableReader 2666.67 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 2666.67 mpp[tiflash] test.t.a, Column#5", - " └─Projection 2666.67 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 2666.67 mpp[tiflash] group by:Column#7, Column#8, funcs:sum(Column#6)->Column#5, funcs:firstrow(Column#7)->test.t.a", - " └─Projection 3333.33 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#6, test.t.a->Column#7, test.t.c->Column#8", - " └─ExchangeReceiver 3333.33 mpp[tiflash] ", - " └─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─Selection 3333.33 mpp[tiflash] gt(test.t.a, 1)", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_2PHASE_AGG(), force_index(t, idx_b) */ a, sum(b) from t where b < 2 group by a, c", - "Plan": [ - "Projection 2658.67 root test.t.a, Column#5", - "└─HashAgg 2658.67 root group by:test.t.a, test.t.c, funcs:sum(Column#7)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─IndexLookUp 2658.67 root ", - " ├─IndexRangeScan(Build) 3323.33 cop[tikv] table:t, index:idx_b(b) range:[-inf,2), keep order:false, stats:pseudo", - " └─HashAgg(Probe) 2658.67 cop[tikv] group by:test.t.a, test.t.c, funcs:sum(test.t.b)->Column#7", - " └─TableRowIDScan 3323.33 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]No available path for table test.t with the store type tiflash of the hint /*+ read_from_storage */, please check the status of the table replica and variable value of tidb_isolation_read_engines(map[0:{} 1:{} 2:{}])", - "[planner:1815]The agg can not push down to the MPP side, the MPP_2PHASE_AGG() hint is invalid" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t]), MPP_2PHASE_AGG(), index_merge(t, idx_b, idx_a) */ a, sum(b) from t where b < 2 or a > 2 group by a, c", - "Plan": [ - "TableReader 4439.11 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 4439.11 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 4439.11 mpp[tiflash] test.t.a, Column#5", - " └─Projection 4439.11 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 4439.11 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#8)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─ExchangeReceiver 4439.11 mpp[tiflash] ", - " └─ExchangeSender 4439.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─HashAgg 4439.11 mpp[tiflash] group by:Column#11, Column#12, funcs:sum(Column#10)->Column#8", - " └─Projection 5548.89 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#10, test.t.a->Column#11, test.t.c->Column#12", - " └─Selection 5548.89 mpp[tiflash] or(lt(test.t.b, 2), gt(test.t.a, 2))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": [ - "[parser:8061]Optimizer hint index_merge is not supported by TiDB and is ignored" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2, t3]), shuffle_join(t1, t2, t3), straight_join() */ * from t t1, t t2, t t3 where t1.a=t2.a and t2.b=t3.b", - "Plan": [ - "TableReader 15593.77 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 15593.77 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 15593.77 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t3 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 12475.01 mpp[tiflash] ", - " └─ExchangeSender 12475.01 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " └─HashJoin 12475.01 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9980.01 mpp[tiflash] ", - " │ └─ExchangeSender 9980.01 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9980.01 mpp[tiflash] not(isnull(test.t.a)), not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2, t3]), shuffle_join(t1, t2, t3), leading(t3, t1) */ * from t t1, t t2, t t3 where t1.a=t2.a and t2.b=t3.b", - "Plan": [ - "TableReader 124625374.88 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 124625374.88 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 124625374.88 mpp[tiflash] test.t.a, test.t.b, test.t.c, test.t.a, test.t.b, test.t.c, test.t.a, test.t.b, test.t.c", - " └─HashJoin 124625374.88 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a) eq(test.t.b, test.t.b)]", - " ├─ExchangeReceiver(Build) 9980.01 mpp[tiflash] ", - " │ └─ExchangeSender 9980.01 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.b, collate: binary]", - " │ └─Selection 9980.01 mpp[tiflash] not(isnull(test.t.a)), not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 99800100.00 mpp[tiflash] ", - " └─ExchangeSender 99800100.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.b, collate: binary]", - " └─HashJoin 99800100.00 mpp[tiflash] CARTESIAN inner join", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t3 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2, t3]), broadcast_join(t1, t2, t3), straight_join() */ * from t t2, t t1, t t3 where t1.a=t2.a and t2.b=t3.b", - "Plan": [ - "TableReader 15593.77 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 15593.77 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 15593.77 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t3 pushed down filter:empty, keep order:false, stats:pseudo", - " └─HashJoin(Probe) 12475.01 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9980.01 mpp[tiflash] ", - " │ └─ExchangeSender 9980.01 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9980.01 mpp[tiflash] not(isnull(test.t.a)), not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2, t3]), broadcast_join(t1, t2, t3), leading(t2, t3) */ * from t t1, t t2, t t3 where t1.a=t2.a and t2.b=t3.b", - "Plan": [ - "TableReader 15593.77 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 15593.77 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 15593.77 mpp[tiflash] test.t.a, test.t.b, test.t.c, test.t.a, test.t.b, test.t.c, test.t.a, test.t.b, test.t.c", - " └─HashJoin 15593.77 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─HashJoin(Probe) 12475.01 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", - " ├─ExchangeReceiver(Build) 9980.01 mpp[tiflash] ", - " │ └─ExchangeSender 9980.01 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9980.01 mpp[tiflash] not(isnull(test.t.a)), not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t3 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ qb_name(qb, v), MPP_1PHASE_AGG(@qb) */ * from v", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#10, Column#9, funcs:sum(Column#8)->Column#5, funcs:firstrow(Column#9)->test.t.a", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#8, test.t.a->Column#9, test.t.c->Column#10", - " └─ExchangeReceiver 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ qb_name(qb, v), MPP_2PHASE_AGG(@qb) */ * from v", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#10)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, Column#14, funcs:sum(Column#12)->Column#10", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#12, test.t.a->Column#13, test.t.c->Column#14", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ qb_name(qb, v1), shuffle_join(t1@qb, t2@qb) */ * from v1", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 12487.50 mpp[tiflash] test.t.a", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ qb_name(qb, v1), broadcast_join(t1@qb, t2@qb) */ * from v1", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 12487.50 mpp[tiflash] test.t.a", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "SELECT /*+ shuffle_join(t) */ * FROM t WHERE EXISTS (SELECT /*+ SEMI_JOIN_REWRITE() */ 1 FROM t t1 WHERE t1.b = t.b);", - "Plan": [ - "TableReader 9990.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 9990.00 mpp[tiflash] test.t.a, test.t.b, test.t.c", - " └─HashJoin 9990.00 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", - " ├─Projection(Build) 7992.00 mpp[tiflash] test.t.b", - " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, funcs:firstrow(test.t.b)->test.t.b", - " │ └─ExchangeReceiver 7992.00 mpp[tiflash] ", - " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, ", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "SELECT /*+ broadcast_join(t) */ * FROM t WHERE EXISTS (SELECT /*+ SEMI_JOIN_REWRITE() */ 1 FROM t t1 WHERE t1.b = t.b);", - "Plan": [ - "TableReader 9990.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 9990.00 mpp[tiflash] test.t.a, test.t.b, test.t.c", - " └─HashJoin 9990.00 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", - " ├─ExchangeReceiver(Build) 7992.00 mpp[tiflash] ", - " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 7992.00 mpp[tiflash] test.t.b", - " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, funcs:firstrow(test.t.b)->test.t.b", - " │ └─ExchangeReceiver 7992.00 mpp[tiflash] ", - " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, ", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select * from t t1 where t1.a < (select /*+ MPP_1PHASE_AGG() */ sum(t2.a) from t t2 where t2.b = t1.b);", - "Plan": [ - "TableReader 9990.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 9990.00 mpp[tiflash] test.t.a, test.t.b, test.t.c", - " └─HashJoin 9990.00 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)], other cond:lt(cast(test.t.a, decimal(10,0) BINARY), Column#9)", - " ├─ExchangeReceiver(Build) 7992.00 mpp[tiflash] ", - " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 7992.00 mpp[tiflash] Column#9, test.t.b", - " │ └─HashAgg 7992.00 mpp[tiflash] group by:Column#32, funcs:sum(Column#31)->Column#9, funcs:firstrow(Column#32)->test.t.b", - " │ └─Projection 9990.00 mpp[tiflash] cast(test.t.a, decimal(10,0) BINARY)->Column#31, test.t.b->Column#32", - " │ └─ExchangeReceiver 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select * from t t1 where t1.a < (select /*+ MPP_2PHASE_AGG() */ sum(t2.a) from t t2 where t2.b = t1.b);", - "Plan": [ - "TableReader 9990.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 9990.00 mpp[tiflash] test.t.a, test.t.b, test.t.c", - " └─HashJoin 9990.00 mpp[tiflash] inner join, equal:[eq(test.t.b, test.t.b)], other cond:lt(cast(test.t.a, decimal(10,0) BINARY), Column#9)", - " ├─ExchangeReceiver(Build) 7992.00 mpp[tiflash] ", - " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 7992.00 mpp[tiflash] Column#9, test.t.b", - " │ └─HashAgg 7992.00 mpp[tiflash] group by:test.t.b, funcs:sum(Column#20)->Column#9, funcs:firstrow(test.t.b)->test.t.b", - " │ └─ExchangeReceiver 7992.00 mpp[tiflash] ", - " │ └─ExchangeSender 7992.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " │ └─HashAgg 7992.00 mpp[tiflash] group by:Column#36, funcs:sum(Column#35)->Column#20", - " │ └─Projection 9990.00 mpp[tiflash] cast(test.t.a, decimal(10,0) BINARY)->Column#35, test.t.b->Column#36", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.b))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "WITH CTE AS (SELECT /*+ MPP_1PHASE_AGG() */ count(*) as a, b FROM t WHERE t.a < 60 group by b) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", - "Plan": [ - "HashAgg 3403.09 root group by:Column#10, Column#11, funcs:firstrow(Column#10)->Column#10, funcs:firstrow(Column#11)->Column#11", - "└─Union 3403.09 root ", - " ├─Selection 1701.55 root lt(Column#6, 18)", - " │ └─CTEFullScan 2126.93 root CTE:cte data:CTE_0", - " └─Selection 1701.55 root gt(test.t.b, 1)", - " └─CTEFullScan 2126.93 root CTE:cte data:CTE_0", - "CTE_0 2126.93 root Non-Recursive CTE", - "└─TableReader(Seed Part) 2126.93 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 2126.93 mpp[tiflash] ExchangeType: PassThrough", - " └─Selection 2126.93 mpp[tiflash] or(lt(Column#5, 18), gt(test.t.b, 1))", - " └─Projection 2658.67 mpp[tiflash] Column#5, test.t.b", - " └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#5, funcs:firstrow(test.t.b)->test.t.b", - " └─ExchangeReceiver 3323.33 mpp[tiflash] ", - " └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 60)", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "WITH CTE AS (SELECT /*+ MPP_2PHASE_AGG() */ count(*) as a, b FROM t WHERE t.a < 60 group by b) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", - "Plan": [ - "HashAgg 3403.09 root group by:Column#10, Column#11, funcs:firstrow(Column#10)->Column#10, funcs:firstrow(Column#11)->Column#11", - "└─Union 3403.09 root ", - " ├─Selection 1701.55 root lt(Column#6, 18)", - " │ └─CTEFullScan 2126.93 root CTE:cte data:CTE_0", - " └─Selection 1701.55 root gt(test.t.b, 1)", - " └─CTEFullScan 2126.93 root CTE:cte data:CTE_0", - "CTE_0 2126.93 root Non-Recursive CTE", - "└─TableReader(Seed Part) 2126.93 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 2126.93 mpp[tiflash] ExchangeType: PassThrough", - " └─Selection 2126.93 mpp[tiflash] or(lt(Column#5, 18), gt(test.t.b, 1))", - " └─Projection 2658.67 mpp[tiflash] Column#5, test.t.b", - " └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:sum(Column#22)->Column#5, funcs:firstrow(test.t.b)->test.t.b", - " └─ExchangeReceiver 2658.67 mpp[tiflash] ", - " └─ExchangeSender 2658.67 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#22", - " └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 60)", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "WITH CTE AS (SELECT /*+ shuffle_join(t1, t) */ t.a, t.b FROM t join t t1 where t.a = t1.a) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", - "Plan": [ - "HashAgg 7095.48 root group by:Column#13, Column#14, funcs:firstrow(Column#13)->Column#13, funcs:firstrow(Column#14)->Column#14", - "└─Union 11086.68 root ", - " ├─Selection 5543.34 root lt(test.t.a, 18)", - " │ └─CTEFullScan 6929.18 root CTE:cte data:CTE_0", - " └─Selection 5543.34 root gt(test.t.b, 1)", - " └─CTEFullScan 6929.18 root CTE:cte data:CTE_0", - "CTE_0 6929.18 root Non-Recursive CTE", - "└─TableReader(Seed Part) 6929.18 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 6929.18 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 6929.18 mpp[tiflash] test.t.a, test.t.b", - " └─HashJoin 6929.18 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)], other cond:or(lt(test.t.a, 18), gt(test.t.b, 1))", - " ├─ExchangeReceiver(Build) 5543.34 mpp[tiflash] ", - " │ └─ExchangeSender 5543.34 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 5543.34 mpp[tiflash] not(isnull(test.t.a)), or(lt(test.t.a, 18), gt(test.t.b, 1))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "WITH CTE AS (SELECT /*+ broadcast_join(t1, t) */ t.a, t.b FROM t join t t1 where t.a = t1.a) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", - "Plan": [ - "HashAgg 7095.48 root group by:Column#13, Column#14, funcs:firstrow(Column#13)->Column#13, funcs:firstrow(Column#14)->Column#14", - "└─Union 11086.68 root ", - " ├─Selection 5543.34 root lt(test.t.a, 18)", - " │ └─CTEFullScan 6929.18 root CTE:cte data:CTE_0", - " └─Selection 5543.34 root gt(test.t.b, 1)", - " └─CTEFullScan 6929.18 root CTE:cte data:CTE_0", - "CTE_0 6929.18 root Non-Recursive CTE", - "└─TableReader(Seed Part) 6929.18 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 6929.18 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 6929.18 mpp[tiflash] test.t.a, test.t.b", - " └─HashJoin 6929.18 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)], other cond:or(lt(test.t.a, 18), gt(test.t.b, 1))", - " ├─ExchangeReceiver(Build) 5543.34 mpp[tiflash] ", - " │ └─ExchangeSender 5543.34 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 5543.34 mpp[tiflash] not(isnull(test.t.a)), or(lt(test.t.a, 18), gt(test.t.b, 1))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "WITH CTE AS (SELECT /*+ MERGE(), MPP_1PHASE_AGG() */ count(*) as a, b FROM t WHERE t.a < 60 group by b) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", - "Plan": [ - "TableReader 3013.16 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 3013.16 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 3013.16 mpp[tiflash] Column#20, Column#21", - " └─HashAgg 3013.16 mpp[tiflash] group by:Column#20, Column#21, funcs:firstrow(Column#20)->Column#20, funcs:firstrow(Column#21)->Column#21", - " └─ExchangeReceiver 3013.16 mpp[tiflash] ", - " └─ExchangeSender 3013.16 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#20, collate: binary], [name: Column#21, collate: binary]", - " └─Union 3013.16 mpp[tiflash] ", - " ├─Selection 2126.93 mpp[tiflash] lt(Column#12, 18)", - " │ └─Projection 2658.67 mpp[tiflash] Column#12, test.t.b", - " │ └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#12, funcs:firstrow(test.t.b)->test.t.b", - " │ └─ExchangeReceiver 3323.33 mpp[tiflash] ", - " │ └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 60)", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", - " └─Projection 886.22 mpp[tiflash] Column#19->Column#20, test.t.b->Column#21", - " └─HashAgg 886.22 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#19, funcs:firstrow(test.t.b)->test.t.b", - " └─ExchangeReceiver 1107.78 mpp[tiflash] ", - " └─ExchangeSender 1107.78 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " └─Selection 1107.78 mpp[tiflash] gt(test.t.b, 1), lt(test.t.a, 60)", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "WITH CTE AS (SELECT /*+ MERGE(), MPP_2PHASE_AGG() */ count(*) as a, b FROM t WHERE t.a < 60 group by b) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", - "Plan": [ - "TableReader 3013.16 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 3013.16 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 3013.16 mpp[tiflash] Column#20, Column#21", - " └─HashAgg 3013.16 mpp[tiflash] group by:Column#20, Column#21, funcs:firstrow(Column#20)->Column#20, funcs:firstrow(Column#21)->Column#21", - " └─ExchangeReceiver 3013.16 mpp[tiflash] ", - " └─ExchangeSender 3013.16 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#20, collate: binary], [name: Column#21, collate: binary]", - " └─Union 3013.16 mpp[tiflash] ", - " ├─Selection 2126.93 mpp[tiflash] lt(Column#12, 18)", - " │ └─Projection 2658.67 mpp[tiflash] Column#12, test.t.b", - " │ └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:sum(Column#32)->Column#12, funcs:firstrow(test.t.b)->test.t.b", - " │ └─ExchangeReceiver 2658.67 mpp[tiflash] ", - " │ └─ExchangeSender 2658.67 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " │ └─HashAgg 2658.67 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#32", - " │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 60)", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", - " └─Projection 886.22 mpp[tiflash] Column#19->Column#20, test.t.b->Column#21", - " └─HashAgg 886.22 mpp[tiflash] group by:test.t.b, funcs:sum(Column#46)->Column#19, funcs:firstrow(test.t.b)->test.t.b", - " └─ExchangeReceiver 886.22 mpp[tiflash] ", - " └─ExchangeSender 886.22 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.b, collate: binary]", - " └─HashAgg 886.22 mpp[tiflash] group by:test.t.b, funcs:count(1)->Column#46", - " └─Selection 1107.78 mpp[tiflash] gt(test.t.b, 1), lt(test.t.a, 60)", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "WITH CTE AS (SELECT /*+ MERGE(), shuffle_join(t1, t) */ t.a, t.b FROM t join t t1 where t.a = t1.a) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", - "Plan": [ - "TableReader 5322.67 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 5322.67 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 5322.67 mpp[tiflash] Column#29, Column#30", - " └─HashAgg 5322.67 mpp[tiflash] group by:Column#29, Column#30, funcs:firstrow(Column#29)->Column#29, funcs:firstrow(Column#30)->Column#30", - " └─ExchangeReceiver 5322.67 mpp[tiflash] ", - " └─ExchangeSender 5322.67 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#29, collate: binary], [name: Column#30, collate: binary]", - " └─HashAgg 5322.67 mpp[tiflash] group by:Column#29, Column#30, ", - " └─Union 8316.67 mpp[tiflash] ", - " ├─Projection 4154.17 mpp[tiflash] test.t.a->Column#29, test.t.b->Column#30", - " │ └─HashJoin 4154.17 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " │ ├─ExchangeReceiver(Build) 3323.33 mpp[tiflash] ", - " │ │ └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 18), not(isnull(test.t.a))", - " │ │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", - " │ └─ExchangeReceiver(Probe) 3323.33 mpp[tiflash] ", - " │ └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 18), not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Projection 4162.50 mpp[tiflash] test.t.a->Column#29, test.t.b->Column#30", - " └─HashJoin 4162.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 3330.00 mpp[tiflash] ", - " │ └─ExchangeSender 3330.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 3330.00 mpp[tiflash] gt(test.t.b, 1), not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "WITH CTE AS (SELECT /*+ MERGE(), broadcast_join(t1, t) */ t.a, t.b FROM t join t t1 where t.a = t1.a) SELECT * FROM CTE WHERE CTE.a <18 union select * from cte where cte.b > 1;", - "Plan": [ - "TableReader 5322.67 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 5322.67 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 5322.67 mpp[tiflash] Column#29, Column#30", - " └─HashAgg 5322.67 mpp[tiflash] group by:Column#29, Column#30, funcs:firstrow(Column#29)->Column#29, funcs:firstrow(Column#30)->Column#30", - " └─ExchangeReceiver 5322.67 mpp[tiflash] ", - " └─ExchangeSender 5322.67 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#29, collate: binary], [name: Column#30, collate: binary]", - " └─HashAgg 5322.67 mpp[tiflash] group by:Column#29, Column#30, ", - " └─Union 8316.67 mpp[tiflash] ", - " ├─Projection 4154.17 mpp[tiflash] test.t.a->Column#29, test.t.b->Column#30", - " │ └─HashJoin 4154.17 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " │ ├─ExchangeReceiver(Build) 3323.33 mpp[tiflash] ", - " │ │ └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ │ └─Selection 3323.33 mpp[tiflash] lt(test.t.a, 18), not(isnull(test.t.a))", - " │ │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", - " │ └─Selection(Probe) 3323.33 mpp[tiflash] lt(test.t.a, 18), not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Projection 4162.50 mpp[tiflash] test.t.a->Column#29, test.t.b->Column#30", - " └─HashJoin 4162.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 3330.00 mpp[tiflash] ", - " │ └─ExchangeSender 3330.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 3330.00 mpp[tiflash] gt(test.t.b, 1), not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t2) */ * from t t1 left join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t2), hash_join_probe(t2) */ * from t t1 left join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]Join hints are conflict, you can only specify one type of join" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_build(t1) */ * from t t1 right join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_probe(t2) */ * from t t1 left join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints", - "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints" - ] - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), broadcast_join(t1, t2), hash_join_probe(t1) */ * from t t1 right join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints", - "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints" - ] - }, - { - "SQL": "set @@session.tidb_opt_mpp_outer_join_fixed_build_side = 1", - "Plan": null, - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t2) */ * from t t1 left join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t1) */ * from t t1 right join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t2) */ * from t t1 left join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t1) */ * from t t1 right join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "set @@session.tidb_opt_mpp_outer_join_fixed_build_side = 0", - "Plan": null, - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t2) */ * from t t1 left join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_build(t1) */ * from t t1 right join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t2) */ * from t t1 left join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] left outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ read_from_storage(tiflash[t1, t2]), shuffle_join(t1, t2), hash_join_probe(t1) */ * from t t1 right join t t2 on t1.a=t2.a", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] right outer join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ shuffle_join(t1, t2@sel_2), hash_join_build(t2@sel_2) */ a from t t1 where t1.a>1 or t1.a in (select a from t t2);", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a", - " └─Selection 8000.00 mpp[tiflash] or(gt(test.t.a, 1), Column#9)", - " └─HashJoin 10000.00 mpp[tiflash] CARTESIAN left outer semi join, other cond:eq(test.t.a, test.t.a)", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]We can't use the HASH_JOIN_BUILD or HASH_JOIN_PROBE hint for left outer semi join, please check the hint" - ] - }, - { - "SQL": "select /*+ shuffle_join(t1, t2@sel_2), hash_join_build(t1) */ a from t t1 where t1.a>1 or t1.a not in (select a from t t2);", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a", - " └─Selection 8000.00 mpp[tiflash] or(gt(test.t.a, 1), Column#9)", - " └─HashJoin 10000.00 mpp[tiflash] Null-aware anti left outer semi join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints", - "[planner:1815]We can't use the HASH_JOIN_BUILD or HASH_JOIN_PROBE hint for anti left outer semi join, please check the hint", - "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints" - ] - }, - { - "SQL": "select /*+ shuffle_join(t1, t2@sel_2), hash_join_probe(t2@sel_2) */ a from t t1 where t1.a>1 or t1.a in (select a from t t2);", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a", - " └─Selection 8000.00 mpp[tiflash] or(gt(test.t.a, 1), Column#9)", - " └─HashJoin 10000.00 mpp[tiflash] CARTESIAN left outer semi join, other cond:eq(test.t.a, test.t.a)", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints", - "[planner:1815]We can't use the HASH_JOIN_BUILD or HASH_JOIN_PROBE hint for left outer semi join, please check the hint", - "[planner:1815]Some HASH_JOIN_BUILD and HASH_JOIN_PROBE hints cannot be utilized for MPP joins, please check the hints" - ] - }, - { - "SQL": "select /*+ shuffle_join(t1, t2@sel_2), hash_join_probe(t1) */ a from t t1 where t1.a>1 or t1.a not in (select a from t t2);", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a", - " └─Selection 8000.00 mpp[tiflash] or(gt(test.t.a, 1), Column#9)", - " └─HashJoin 10000.00 mpp[tiflash] Null-aware anti left outer semi join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]We can't use the HASH_JOIN_BUILD or HASH_JOIN_PROBE hint for anti left outer semi join, please check the hint" - ] - } - ] - }, - { - "Name": "TestMPPHintsScope", - "Cases": [ - { - "SQL": "set @@session.tidb_allow_mpp=true", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select /*+ MPP_1PHASE_AGG() */ a, sum(b) from t group by a, c", - "Plan": [ - "TableReader_31 8000.00 root MppVersion: 2, data:ExchangeSender_30", - "└─ExchangeSender_30 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection_5 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection_29 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg_27 8000.00 mpp[tiflash] group by:Column#10, Column#9, funcs:sum(Column#8)->Column#5, funcs:firstrow(Column#9)->test.t.a", - " └─Projection_32 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#8, test.t.a->Column#9, test.t.c->Column#10", - " └─ExchangeReceiver_23 10000.00 mpp[tiflash] ", - " └─ExchangeSender_22 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─TableFullScan_21 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "explain select /*+ MPP_2PHASE_AGG() */ a, sum(b) from t group by a, c", - "Plan": [ - "TableReader_35 8000.00 root MppVersion: 2, data:ExchangeSender_34", - "└─ExchangeSender_34 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection_5 8000.00 mpp[tiflash] test.t.a, Column#5", - " └─Projection_30 8000.00 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg_31 8000.00 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#10)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─ExchangeReceiver_33 8000.00 mpp[tiflash] ", - " └─ExchangeSender_32 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─HashAgg_29 8000.00 mpp[tiflash] group by:Column#13, Column#14, funcs:sum(Column#12)->Column#10", - " └─Projection_36 10000.00 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#12, test.t.a->Column#13, test.t.c->Column#14", - " └─TableFullScan_21 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "explain select /*+ shuffle_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_22 12487.50 root MppVersion: 2, data:ExchangeSender_21", - "└─ExchangeSender_21 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_20 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_13(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_12 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection_11 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_10 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver_17(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender_16 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection_15 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_14 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "explain select /*+ broadcast_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_20 12487.50 root MppVersion: 2, data:ExchangeSender_19", - "└─ExchangeSender_19 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_18 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_13(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_12 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_11 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_10 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection_15(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_14 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "set @@session.tidb_enforce_mpp=true", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select /*+ hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_69 12487.50 root MppVersion: 2, data:ExchangeSender_68", - "└─ExchangeSender_68 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_61 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_65(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_64 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_63 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_62 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection_67(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_66 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "explain select /*+ merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "MergeJoin_10 12487.50 root inner join, left key:test.t.a, right key:test.t.a", - "├─Projection_19(Build) 9990.00 root test.t.a, test.t.b, test.t.c", - "│ └─IndexLookUp_18 9990.00 root ", - "│ ├─IndexFullScan_16(Build) 9990.00 cop[tikv] table:t2, index:idx_a(a) keep order:true, stats:pseudo", - "│ └─TableRowIDScan_17(Probe) 9990.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - "└─Projection_15(Probe) 9990.00 root test.t.a, test.t.b, test.t.c", - " └─IndexLookUp_14 9990.00 root ", - " ├─IndexFullScan_12(Build) 9990.00 cop[tikv] table:t1, index:idx_a(a) keep order:true, stats:pseudo", - " └─TableRowIDScan_13(Probe) 9990.00 cop[tikv] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "MPP mode may be blocked because you have used hint to specify a join algorithm which is not supported by mpp now.", - "MPP mode may be blocked because you have used hint to specify a join algorithm which is not supported by mpp now." - ] - }, - { - "SQL": "set @@session.tidb_enforce_mpp=false", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select /*+ hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_69 12487.50 root MppVersion: 2, data:ExchangeSender_68", - "└─ExchangeSender_68 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_61 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_65(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_64 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_63 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_62 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection_67(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_66 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "explain select /*+ merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "MergeJoin_10 12487.50 root inner join, left key:test.t.a, right key:test.t.a", - "├─Projection_19(Build) 9990.00 root test.t.a, test.t.b, test.t.c", - "│ └─IndexLookUp_18 9990.00 root ", - "│ ├─IndexFullScan_16(Build) 9990.00 cop[tikv] table:t2, index:idx_a(a) keep order:true, stats:pseudo", - "│ └─TableRowIDScan_17(Probe) 9990.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - "└─Projection_15(Probe) 9990.00 root test.t.a, test.t.b, test.t.c", - " └─IndexLookUp_14 9990.00 root ", - " ├─IndexFullScan_12(Build) 9990.00 cop[tikv] table:t1, index:idx_a(a) keep order:true, stats:pseudo", - " └─TableRowIDScan_13(Probe) 9990.00 cop[tikv] table:t1 keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "explain select /*+ read_from_storage(tiflash[t1, t2]) hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_29 12487.50 root MppVersion: 2, data:ExchangeSender_28", - "└─ExchangeSender_28 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_21 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_25(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_24 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_23 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_22 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection_27(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_26 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "explain select /*+ read_from_storage(tiflash[t1, t2]) merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "MergeJoin_11 12487.50 root inner join, left key:test.t.a, right key:test.t.a", - "├─Sort_21(Build) 9990.00 root test.t.a", - "│ └─TableReader_20 9990.00 root MppVersion: 2, data:ExchangeSender_19", - "│ └─ExchangeSender_19 9990.00 mpp[tiflash] ExchangeType: PassThrough", - "│ └─Selection_18 9990.00 mpp[tiflash] not(isnull(test.t.a))", - "│ └─TableFullScan_17 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo", - "└─Sort_16(Probe) 9990.00 root test.t.a", - " └─TableReader_15 9990.00 root MppVersion: 2, data:ExchangeSender_14", - " └─ExchangeSender_14 9990.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Selection_13 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_12 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "set @@session.tidb_allow_mpp=false", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select /*+ MPP_1PHASE_AGG() */ a, sum(b) from t group by a, c", - "Plan": [ - "Projection_4 8000.00 root test.t.a, Column#5", - "└─HashAgg_10 8000.00 root group by:test.t.a, test.t.c, funcs:sum(Column#6)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─TableReader_11 8000.00 root data:HashAgg_5", - " └─HashAgg_5 8000.00 cop[tikv] group by:test.t.a, test.t.c, funcs:sum(test.t.b)->Column#6", - " └─TableFullScan_8 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]The agg can not push down to the MPP side, the MPP_1PHASE_AGG() hint is invalid" - ] - }, - { - "SQL": "explain select /*+ MPP_2PHASE_AGG() */ a, sum(b) from t group by a, c", - "Plan": [ - "Projection_4 8000.00 root test.t.a, Column#5", - "└─HashAgg_10 8000.00 root group by:test.t.a, test.t.c, funcs:sum(Column#6)->Column#5, funcs:firstrow(test.t.a)->test.t.a", - " └─TableReader_11 8000.00 root data:HashAgg_5", - " └─HashAgg_5 8000.00 cop[tikv] group by:test.t.a, test.t.c, funcs:sum(test.t.b)->Column#6", - " └─TableFullScan_8 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]The agg can not push down to the MPP side, the MPP_2PHASE_AGG() hint is invalid" - ] - }, - { - "SQL": "explain select /*+ shuffle_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "HashJoin_37 12487.50 root inner join, equal:[eq(test.t.a, test.t.a)]", - "├─TableReader_56(Build) 9990.00 root data:Selection_55", - "│ └─Selection_55 9990.00 cop[tikv] not(isnull(test.t.a))", - "│ └─TableFullScan_54 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - "└─TableReader_49(Probe) 9990.00 root data:Selection_48", - " └─Selection_48 9990.00 cop[tikv] not(isnull(test.t.a))", - " └─TableFullScan_47 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]The join can not push down to the MPP side, the shuffle_join() hint is invalid" - ] - }, - { - "SQL": "explain select /*+ broadcast_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "HashJoin_37 12487.50 root inner join, equal:[eq(test.t.a, test.t.a)]", - "├─TableReader_56(Build) 9990.00 root data:Selection_55", - "│ └─Selection_55 9990.00 cop[tikv] not(isnull(test.t.a))", - "│ └─TableFullScan_54 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - "└─TableReader_49(Probe) 9990.00 root data:Selection_48", - " └─Selection_48 9990.00 cop[tikv] not(isnull(test.t.a))", - " └─TableFullScan_47 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]The join can not push down to the MPP side, the broadcast_join() hint is invalid" - ] - } - ] - }, - { - "Name": "TestMPPBCJModel", - "Cases": [ - { - "SQL": "set @@session.tidb_allow_mpp=true", - "Plan": null, - "Warn": null - }, - { - "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=0", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_79 12487.50 root MppVersion: 2, data:ExchangeSender_78", - "└─ExchangeSender_78 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_77 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_44(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_43 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_42 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_41 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection_46(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_45 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=1", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_81 12487.50 root MppVersion: 2, data:ExchangeSender_80", - "└─ExchangeSender_80 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_79 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_44(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_43 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection_42 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_41 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver_48(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender_47 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection_46 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_45 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - } - ] - }, - { - "Name": "TestMPPPreferBCJ", - "Cases": [ - { - "SQL": "explain select * from t1, t2 where t1.a=t2.b", - "Plan": [ - "TableReader_36 1.00 root MppVersion: 2, data:ExchangeSender_35", - "└─ExchangeSender_35 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_34 1.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.b)]", - " ├─ExchangeReceiver_15(Build) 1.00 mpp[tiflash] ", - " │ └─ExchangeSender_14 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_13 1.00 mpp[tiflash] not(isnull(test.t1.a))", - " │ └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", - " └─Selection_17(Probe) 8.00 mpp[tiflash] not(isnull(test.t2.b))", - " └─TableFullScan_16 8.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false" - ], - "Warn": null - }, - { - "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=1", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t1, t2 where t1.a=t2.b", - "Plan": [ - "TableReader_38 1.00 root MppVersion: 2, data:ExchangeSender_37", - "└─ExchangeSender_37 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_36 1.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.b)]", - " ├─ExchangeReceiver_15(Build) 1.00 mpp[tiflash] ", - " │ └─ExchangeSender_14 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t1.a, collate: binary]", - " │ └─Selection_13 1.00 mpp[tiflash] not(isnull(test.t1.a))", - " │ └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", - " └─ExchangeReceiver_19(Probe) 8.00 mpp[tiflash] ", - " └─ExchangeSender_18 8.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t2.b, collate: binary]", - " └─Selection_17 8.00 mpp[tiflash] not(isnull(test.t2.b))", - " └─TableFullScan_16 8.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false" - ], - "Warn": null - }, - { - "SQL": "insert into t2 values (9); analyze table t2;", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t1, t2 where t1.a=t2.b", - "Plan": [ - "TableReader_36 1.00 root MppVersion: 2, data:ExchangeSender_35", - "└─ExchangeSender_35 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_34 1.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.b)]", - " ├─ExchangeReceiver_15(Build) 1.00 mpp[tiflash] ", - " │ └─ExchangeSender_14 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_13 1.00 mpp[tiflash] not(isnull(test.t1.a))", - " │ └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", - " └─Selection_17(Probe) 9.00 mpp[tiflash] not(isnull(test.t2.b))", - " └─TableFullScan_16 9.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false" - ], - "Warn": null - } - ] - }, - { - "Name": "TestMPPBCJModelOneTiFlash", - "Cases": [ - { - "SQL": "set @@session.tidb_allow_mpp=true", - "Plan": null, - "Warn": null - }, - { - "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=0", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_81 12487.50 root MppVersion: 2, data:ExchangeSender_80", - "└─ExchangeSender_80 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_79 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_44(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_43 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " │ └─Selection_42 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_41 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver_48(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender_47 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─Selection_46 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_45 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=1", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t t1, t t2 where t1.a=t2.a", - "Plan": [ - "TableReader_79 12487.50 root MppVersion: 2, data:ExchangeSender_78", - "└─ExchangeSender_78 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_77 12487.50 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.a)]", - " ├─ExchangeReceiver_44(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender_43 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_42 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " │ └─TableFullScan_41 10000.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection_46(Probe) 9990.00 mpp[tiflash] not(isnull(test.t.a))", - " └─TableFullScan_45 10000.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warn": null - } - ] - }, - { - "Name": "TestMPPRightSemiJoin", - "Cases": [ - { - "SQL": "set @@session.tidb_allow_mpp=true", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t1 where exists (select * from t2 where t1.a=t2.b)", - "Plan": [ - "TableReader_36 0.80 root MppVersion: 2, data:ExchangeSender_35", - "└─ExchangeSender_35 0.80 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_34 0.80 mpp[tiflash] semi join, equal:[eq(test.t1.a, test.t2.b)]", - " ├─ExchangeReceiver_17(Build) 8.00 mpp[tiflash] ", - " │ └─ExchangeSender_16 8.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_15 8.00 mpp[tiflash] not(isnull(test.t2.b))", - " │ └─TableFullScan_14 8.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false", - " └─Selection_13(Probe) 1.00 mpp[tiflash] not(isnull(test.t1.a))", - " └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false" - ], - "Warn": null - }, - { - "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=0", - "Plan": null, - "Warn": null - }, - { - "SQL": "set @@session.tidb_broadcast_join_threshold_size=0", - "Plan": null, - "Warn": null - }, - { - "SQL": "set @@session.tidb_broadcast_join_threshold_count=0", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t1 where exists (select * from t2 where t1.a=t2.b)", - "Plan": [ - "TableReader_38 0.80 root MppVersion: 2, data:ExchangeSender_37", - "└─ExchangeSender_37 0.80 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_36 0.80 mpp[tiflash] semi join, equal:[eq(test.t1.a, test.t2.b)]", - " ├─ExchangeReceiver_15(Build) 1.00 mpp[tiflash] ", - " │ └─ExchangeSender_14 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t1.a, collate: binary]", - " │ └─Selection_13 1.00 mpp[tiflash] not(isnull(test.t1.a))", - " │ └─TableFullScan_12 1.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", - " └─ExchangeReceiver_19(Probe) 8.00 mpp[tiflash] ", - " └─ExchangeSender_18 8.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t2.b, collate: binary]", - " └─Selection_17 8.00 mpp[tiflash] not(isnull(test.t2.b))", - " └─TableFullScan_16 8.00 mpp[tiflash] table:t2 pushed down filter:empty, keep order:false" - ], - "Warn": null - } - ] - }, - { - "Name": "TestMPPRightOuterJoin", - "Cases": [ - { - "SQL": "set @@session.tidb_allow_mpp=true", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t1 right join t2 on t1.a=t2.b and t1.c < t2.d", - "Plan": [ - "TableReader_32 3.00 root MppVersion: 2, data:ExchangeSender_31", - "└─ExchangeSender_31 3.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_30 3.00 mpp[tiflash] right outer join, equal:[eq(test.t1.a, test.t2.b)], other cond:lt(test.t1.c, test.t2.d)", - " ├─ExchangeReceiver_14(Build) 5.00 mpp[tiflash] ", - " │ └─ExchangeSender_13 5.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection_12 5.00 mpp[tiflash] not(isnull(test.t1.a)), not(isnull(test.t1.c))", - " │ └─TableFullScan_11 5.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false", - " └─TableFullScan_15(Probe) 3.00 mpp[tiflash] table:t2 keep order:false" - ], - "Warn": null - }, - { - "SQL": "set @@session.tidb_prefer_broadcast_join_by_exchange_data_size=0", - "Plan": null, - "Warn": null - }, - { - "SQL": "set @@session.tidb_broadcast_join_threshold_size=0", - "Plan": null, - "Warn": null - }, - { - "SQL": "set @@session.tidb_broadcast_join_threshold_count=0", - "Plan": null, - "Warn": null - }, - { - "SQL": "explain select * from t1 right join t2 on t1.a=t2.b and t1.c < t2.d", - "Plan": [ - "TableReader_34 3.00 root MppVersion: 2, data:ExchangeSender_33", - "└─ExchangeSender_33 3.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin_32 3.00 mpp[tiflash] right outer join, equal:[eq(test.t1.a, test.t2.b)], other cond:lt(test.t1.c, test.t2.d)", - " ├─ExchangeReceiver_17(Build) 3.00 mpp[tiflash] ", - " │ └─ExchangeSender_16 3.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t2.b, collate: binary]", - " │ └─TableFullScan_15 3.00 mpp[tiflash] table:t2 keep order:false", - " └─ExchangeReceiver_14(Probe) 5.00 mpp[tiflash] ", - " └─ExchangeSender_13 5.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t1.a, collate: binary]", - " └─Selection_12 5.00 mpp[tiflash] not(isnull(test.t1.a)), not(isnull(test.t1.c))", - " └─TableFullScan_11 5.00 mpp[tiflash] table:t1 pushed down filter:empty, keep order:false" - ], - "Warn": null - } - ] - }, - { - "Name": "TestIssue37520", - "Cases": [ - { - "SQL": "select /*+ inl_join(t1@sel_2) */ a, (select b from t1 where t1.a = t2.b) from t2;", - "Plan": [ - "IndexJoin 12500.00 root left outer join, inner:TableReader, outer key:test.t2.b, inner key:test.t1.a, equal cond:eq(test.t2.b, test.t1.a)", - "├─TableReader(Build) 10000.00 root data:TableFullScan", - "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - "└─TableReader(Probe) 10000.00 root data:TableRangeScan", - " └─TableRangeScan 10000.00 cop[tikv] table:t1 range: decided by [test.t2.b], keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ inl_join(t2) */ a, (select b from t1 where t1.a = t2.b) from t2;", - "Plan": [ - "HashJoin 12500.00 root left outer join, equal:[eq(test.t2.b, test.t1.a)]", - "├─TableReader(Build) 10000.00 root data:TableFullScan", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" - ], - "Warn": [ - "[planner:1815]Optimizer Hint /*+ INL_JOIN(t2) */ or /*+ TIDB_INLJ(t2) */ is inapplicable" - ] - }, - { - "SQL": "select /*+ inl_join(t2@sel_2) */ * from t1 where exists ( select /*+ semi_join_rewrite() */ * from t2 where t1.a = t2.a);", - "Plan": [ - "IndexJoin 9990.00 root inner join, inner:HashAgg, outer key:test.t1.a, inner key:test.t2.a, equal cond:eq(test.t1.a, test.t2.a)", - "├─TableReader(Build) 10000.00 root data:TableFullScan", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─HashAgg(Probe) 79920000.00 root group by:test.t2.a, funcs:firstrow(test.t2.a)->test.t2.a", - " └─IndexReader 79920000.00 root index:HashAgg", - " └─HashAgg 79920000.00 cop[tikv] group by:test.t2.a, ", - " └─Selection 9990.00 cop[tikv] not(isnull(test.t2.a))", - " └─IndexRangeScan 10000.00 cop[tikv] table:t2, index:ia(a) range: decided by [eq(test.t2.a, test.t1.a)], keep order:false, stats:pseudo" - ], - "Warn": null - }, - { - "SQL": "select /*+ inl_join(t1) */ * from t1 where exists ( select /*+ semi_join_rewrite() */ * from t2 where t1.a = t2.a);", - "Plan": [ - "IndexJoin 9990.00 root inner join, inner:TableReader, outer key:test.t2.a, inner key:test.t1.a, equal cond:eq(test.t2.a, test.t1.a)", - "├─StreamAgg(Build) 7992.00 root group by:test.t2.a, funcs:firstrow(test.t2.a)->test.t2.a", - "│ └─IndexReader 7992.00 root index:StreamAgg", - "│ └─StreamAgg 7992.00 cop[tikv] group by:test.t2.a, ", - "│ └─IndexFullScan 9990.00 cop[tikv] table:t2, index:ia(a) keep order:true, stats:pseudo", - "└─TableReader(Probe) 7992.00 root data:TableRangeScan", - " └─TableRangeScan 7992.00 cop[tikv] table:t1 range: decided by [test.t2.a], keep order:false, stats:pseudo" - ], - "Warn": null - } - ] - }, - { - "Name": "TestHintScope", - "Cases": [ - { - "SQL": "select /*+ MERGE_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ INL_JOIN(t3) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "MergeInnerJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ MERGE_JOIN(test.t1) */ t1.a, t1.b from t t1, (select /*+ INL_JOIN(test.t3) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "MergeInnerJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ MERGE_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ HASH_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "MergeInnerJoin{TableReader(Table(t))->LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)->Sort}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ INL_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ HASH_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "IndexJoin{TableReader(Table(t))->LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ INL_JOIN(test.t1) */ t1.a, t1.b from t t1, (select /*+ HASH_JOIN(test.t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "IndexJoin{TableReader(Table(t))->LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ INL_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ MERGE_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "IndexJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ HASH_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ MERGE_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "RightHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ HASH_JOIN(test.t1) */ t1.a, t1.b from t t1, (select /*+ MERGE_JOIN(test.t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "RightHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ HASH_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ INL_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "RightHashJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.c,test.t.a)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ MERGE_JOIN(t1) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "MergeInnerJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ INL_JOIN(t1) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "IndexJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ HASH_JOIN(t1) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Best": "RightHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)}(test.t.a,test.t.a)" - }, - { - "SQL": "select /*+ HASH_JOIN(@sel_2 t1@sel_2, t2@sel_2), MERGE_JOIN(@sel_1 t1@sel_1, t2@sel_1) */ * from (select t1.a, t1.b from t t1, t t2 where t1.a = t2.a) t1, t t2 where t1.b = t2.b", - "Best": "MergeInnerJoin{LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.a,test.t.a)->Sort->TableReader(Table(t))->Sort}(test.t.b,test.t.b)" - }, - { - "SQL": "select /*+ STREAM_AGG() */ s, count(s) from (select /*+ HASH_AGG() */ sum(t1.a) as s from t t1, t t2 where t1.a = t2.b group by t1.a) p group by s", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->Projection->HashAgg->Sort->StreamAgg->Projection" - }, - { - "SQL": "select /*+ HASH_AGG() */ s, count(s) from (select /*+ STREAM_AGG() */ sum(t1.a) as s from t t1, t t2 where t1.a = t2.b group by t1.a) p group by s", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->Sort->Projection->StreamAgg->HashAgg->Projection" - }, - { - "SQL": "select /*+ HASH_AGG() */ s, count(s) from (select sum(t1.a) as s from t t1, t t2 where t1.a = t2.b group by t1.a) p group by s", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->Projection->HashAgg->HashAgg->Projection" - }, - { - "SQL": "select /*+ STREAM_AGG() */ s, count(s) from (select sum(t1.a) as s from t t1, t t2 where t1.a = t2.b group by t1.a) p group by s", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->Projection->HashAgg->Sort->StreamAgg->Projection" - } - ] - }, - { - "Name": "TestIndexHint", - "Cases": [ - { - "SQL": "select /*+ USE_INDEX(t, c_d_e) */ * from t", - "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), no_order_index(@`sel_1` `test`.`t` `c_d_e`)" - }, - { - "SQL": "select /*+ USE_INDEX(test.t, c_d_e) */ * from t", - "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), no_order_index(@`sel_1` `test`.`t` `c_d_e`)" - }, - { - "SQL": "select /*+ IGNORE_INDEX(t, c_d_e) */ c from t order by c", - "Best": "TableReader(Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ IGNORE_INDEX(test.t, c_d_e) */ c from t order by c", - "Best": "TableReader(Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t, c_d_e) */ * from t", - "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), no_order_index(@`sel_1` `test`.`t` `c_d_e`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(test.t, c_d_e) */ * from t", - "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), no_order_index(@`sel_1` `test`.`t` `c_d_e`)" - }, - { - "SQL": "select /*+ USE_INDEX(t, c_d_e) */ * from t t1", - "Best": "TableReader(Table(t))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`)" - }, - { - "SQL": "select /*+ IGNORE_INDEX(t, c_d_e) */ t1.c from t t1 order by t1.c", - "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t1` `c_d_e`), order_index(@`sel_1` `test`.`t1` `c_d_e`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t, c_d_e) */ * from t t1", - "Best": "TableReader(Table(t))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`)" - }, - { - "SQL": "select /*+ USE_INDEX(t1, c_d_e) */ * from t t1", - "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t1` `c_d_e`), no_order_index(@`sel_1` `test`.`t1` `c_d_e`)" - }, - { - "SQL": "select /*+ IGNORE_INDEX(t1, c_d_e) */ t1.c from t t1 order by t1.c", - "Best": "TableReader(Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t1, c_d_e) */ * from t t1", - "Best": "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t1` `c_d_e`), no_order_index(@`sel_1` `test`.`t1` `c_d_e`)" - }, - { - "SQL": "select /*+ USE_INDEX(t1, c_d_e), USE_INDEX(t2, f) */ * from t t1, t t2 where t1.a = t2.b", - "Best": "LeftHashJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))}(test.t.a,test.t.b)", - "HasWarn": false, - "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` `c_d_e`), no_order_index(@`sel_1` `test`.`t1` `c_d_e`), use_index(@`sel_1` `test`.`t2` `f`), no_order_index(@`sel_1` `test`.`t2` `f`)" - }, - { - "SQL": "select /*+ IGNORE_INDEX(t1, c_d_e), IGNORE_INDEX(t2, f), HASH_JOIN(t1) */ * from t t1, t t2 where t1.a = t2.b", - "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.b)", - "HasWarn": false, - "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_1` `test`.`t2` ), no_order_index(@`sel_1` `test`.`t2` `primary`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t1, c_d_e), FORCE_INDEX(t2, f) */ * from t t1, t t2 where t1.a = t2.b", - "Best": "LeftHashJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))}(test.t.a,test.t.b)", - "HasWarn": false, - "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` `c_d_e`), no_order_index(@`sel_1` `test`.`t1` `c_d_e`), use_index(@`sel_1` `test`.`t2` `f`), no_order_index(@`sel_1` `test`.`t2` `f`)" - }, - { - "SQL": "select /*+ USE_INDEX(t, c_d_e, f, g) */ * from t order by f", - "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `f`), order_index(@`sel_1` `test`.`t` `f`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t, c_d_e, f, g) */ * from t order by f", - "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `f`), order_index(@`sel_1` `test`.`t` `f`)" - }, - { - "SQL": "select /*+ USE_INDEX(t) */ f from t where f > 10", - "Best": "TableReader(Table(t)->Sel([gt(test.t.f, 10)]))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t) */ f from t where f > 10", - "Best": "TableReader(Table(t)->Sel([gt(test.t.f, 10)]))", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ USE_INDEX(t, no_such_index) */ * from t", - "Best": "TableReader(Table(t))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ IGNORE_INDEX(t, no_such_index) */ * from t", - "Best": "TableReader(Table(t))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t, no_such_index) */ * from t", - "Best": "TableReader(Table(t))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ USE_INDEX(t, c_d_e), IGNORE_INDEX(t, f) */ c from t order by c", - "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), order_index(@`sel_1` `test`.`t` `c_d_e`)" - }, - { - "SQL": "select /*+ USE_INDEX(t, f), IGNORE_INDEX(t, f) */ c from t order by c", - "Best": "TableReader(Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ USE_INDEX(t, c_d_e), IGNORE_INDEX(t, c_d_e) */ c from t order by c", - "Best": "TableReader(Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ USE_INDEX(t, c_d_e, f), IGNORE_INDEX(t, c_d_e) */ c from t order by c", - "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `f`), no_order_index(@`sel_1` `test`.`t` `f`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t, c_d_e), IGNORE_INDEX(t, f) */ c from t order by c", - "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]])", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `c_d_e`), order_index(@`sel_1` `test`.`t` `c_d_e`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t, f), IGNORE_INDEX(t, f) */ c from t order by c", - "Best": "TableReader(Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t, c_d_e), IGNORE_INDEX(t, c_d_e) */ c from t order by c", - "Best": "TableReader(Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ FORCE_INDEX(t, c_d_e, f), IGNORE_INDEX(t, c_d_e) */ c from t order by c", - "Best": "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))->Sort", - "HasWarn": false, - "Hints": "use_index(@`sel_1` `test`.`t` `f`), no_order_index(@`sel_1` `test`.`t` `f`)" - } - ] - }, - { - "Name": "TestIndexMergeHint", - "Cases": [ - { - "SQL": "select /*+ USE_INDEX_MERGE(t, c_d_e, f_g) */ * from t where c < 1 or f > 2", - "Best": "IndexMergeReader(PartialPlans->[Index(t.c_d_e)[[-inf,1)], Index(t.f_g)[(2,+inf]]], TablePlan->Table(t))", - "HasWarn": false, - "Hints": "use_index_merge(@`sel_1` `t` `c_d_e`, `f_g`)" - }, - { - "SQL": "select /*+ USE_INDEX_MERGE(t, primary, f_g) */ * from t where a < 1 or f > 2", - "Best": "IndexMergeReader(PartialPlans->[Table(t), Index(t.f_g)[(2,+inf]]], TablePlan->Table(t))", - "HasWarn": false, - "Hints": "use_index_merge(@`sel_1` `t` `primary`, `f_g`)" - }, - { - "SQL": "select /*+ USE_INDEX_MERGE(t, primary, f_g, c_d_e) */ * from t where a < 1 or f > 2", - "Best": "IndexMergeReader(PartialPlans->[Table(t), Index(t.f_g)[(2,+inf]]], TablePlan->Table(t))", - "HasWarn": false, - "Hints": "use_index_merge(@`sel_1` `t` `primary`, `f_g`)" - }, - { - "SQL": "select /*+ NO_INDEX_MERGE(), USE_INDEX_MERGE(t, primary, f_g, c_d_e) */ * from t where a < 1 or f > 2", - "Best": "TableReader(Table(t)->Sel([or(lt(test.t.a, 1), gt(test.t.f, 2))]))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ USE_INDEX_MERGE(t1, c_d_e, f_g) */ * from t where c < 1 or f > 2", - "Best": "TableReader(Table(t)->Sel([or(lt(test.t.c, 1), gt(test.t.f, 2))]))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ NO_INDEX_MERGE(), USE_INDEX_MERGE(t, primary, f_g, c_d_e) */ * from t where a < 1 or f > 2", - "Best": "TableReader(Table(t)->Sel([or(lt(test.t.a, 1), gt(test.t.f, 2))]))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ USE_INDEX_MERGE(t) USE_INDEX_MERGE(t) */ * from t where c < 1 or f > 2", - "Best": "IndexMergeReader(PartialPlans->[Index(t.c_d_e)[[-inf,1)], Index(t.f)[(2,+inf]]], TablePlan->Table(t))", - "HasWarn": false, - "Hints": "use_index_merge(@`sel_1` `t` `c_d_e`, `f`)" - }, - { - "SQL": "select /*+ USE_INDEX_MERGE(db2.t) */ * from t where c < 1 or f > 2", - "Best": "TableReader(Table(t)->Sel([or(lt(test.t.c, 1), gt(test.t.f, 2))]))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - }, - { - "SQL": "select /*+ USE_INDEX_MERGE(db2.t, c_d_e, f_g) */ * from t where c < 1 or f > 2", - "Best": "TableReader(Table(t)->Sel([or(lt(test.t.c, 1), gt(test.t.f, 2))]))", - "HasWarn": true, - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`)" - } - ] - }, - { - "Name": "TestRefine", - "Cases": [ - { - "SQL": "select a from t where c is not null", - "Best": "IndexReader(Index(t.f)[[NULL,+inf]])" - }, - { - "SQL": "select a from t where c >= 4", - "Best": "IndexReader(Index(t.c_d_e)[[4,+inf]]->Projection)" - }, - { - "SQL": "select a from t where c <= 4", - "Best": "IndexReader(Index(t.c_d_e)[[-inf,4]]->Projection)" - }, - { - "SQL": "select a from t where c = 4 and d = 5 and e = 6", - "Best": "PointGet(Index(t.c_d_e)[KindInt64 4 KindInt64 5 KindInt64 6])->Projection" - }, - { - "SQL": "select a from t where d = 4 and c = 5", - "Best": "IndexReader(Index(t.c_d_e)[[5 4,5 4]]->Projection)" - }, - { - "SQL": "select a from t where c = 4 and e < 5", - "Best": "IndexReader(Index(t.c_d_e)[[4,4]]->Sel([lt(test.t.e, 5)])->Projection)" - }, - { - "SQL": "select a from t where c = 4 and d <= 5 and d > 3", - "Best": "IndexReader(Index(t.c_d_e)[(4 3,4 5]]->Projection)" - }, - { - "SQL": "select a from t where d <= 5 and d > 3", - "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Sel([le(test.t.d, 5) gt(test.t.d, 3)])->Projection)" - }, - { - "SQL": "select a from t where c between 1 and 2", - "Best": "IndexReader(Index(t.c_d_e)[[1,2]]->Projection)" - }, - { - "SQL": "select a from t where c not between 1 and 2", - "Best": "IndexReader(Index(t.c_d_e)[[-inf,1) (2,+inf]]->Projection)" - }, - { - "SQL": "select a from t where c <= 5 and c >= 3 and d = 1", - "Best": "IndexReader(Index(t.c_d_e)[[3,5]]->Sel([eq(test.t.d, 1)])->Projection)" - }, - { - "SQL": "select a from t where c = 1 or c = 2 or c = 3", - "Best": "IndexReader(Index(t.c_d_e)[[1,3]]->Projection)" - }, - { - "SQL": "select b from t where c = 1 or c = 2 or c = 3 or c = 4 or c = 5", - "Best": "TableReader(Table(t)->Sel([or(or(eq(test.t.c, 1), eq(test.t.c, 2)), or(eq(test.t.c, 3), or(eq(test.t.c, 4), eq(test.t.c, 5))))])->Projection)" - }, - { - "SQL": "select a from t where c = 5", - "Best": "IndexReader(Index(t.c_d_e)[[5,5]]->Projection)" - }, - { - "SQL": "select a from t where c = 5 and b = 1", - "Best": "IndexLookUp(Index(t.c_d_e)[[5,5]], Table(t)->Sel([eq(test.t.b, 1)]))->Projection" - }, - { - "SQL": "select a from t where not a", - "Best": "PointGet(Handle(t.a)0)" - }, - { - "SQL": "select a from t where c in (1)", - "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Projection)" - }, - { - "SQL": "select a from t where c in ('1')", - "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Projection)" - }, - { - "SQL": "select a from t where c = 1.0", - "Best": "IndexReader(Index(t.c_d_e)[[1,1]]->Projection)" - }, - { - "SQL": "select a from t where c in (1) and d > 3", - "Best": "IndexReader(Index(t.c_d_e)[(1 3,1 +inf]]->Projection)" - }, - { - "SQL": "select a from t where c in (1, 2, 3) and (d > 3 and d < 4 or d > 5 and d < 6)", - "Best": "Dual->Projection" - }, - { - "SQL": "select a from t where c in (1, 2, 3) and (d > 2 and d < 4 or d > 5 and d < 7)", - "Best": "IndexReader(Index(t.c_d_e)[[1 3,1 3] [1 6,1 6] [2 3,2 3] [2 6,2 6] [3 3,3 3] [3 6,3 6]]->Projection)" - }, - { - "SQL": "select a from t where c in (1, 2, 3)", - "Best": "IndexReader(Index(t.c_d_e)[[1,1] [2,2] [3,3]]->Projection)" - }, - { - "SQL": "select a from t where c in (1, 2, 3) and d in (1,2) and e = 1", - "Best": "BatchPointGet(Index(t.c_d_e)[[KindInt64 1 KindInt64 1 KindInt64 1] [KindInt64 1 KindInt64 2 KindInt64 1] [KindInt64 2 KindInt64 1 KindInt64 1] [KindInt64 2 KindInt64 2 KindInt64 1] [KindInt64 3 KindInt64 1 KindInt64 1] [KindInt64 3 KindInt64 2 KindInt64 1]])->Projection" - }, - { - "SQL": "select a from t where d in (1, 2, 3)", - "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Sel([in(test.t.d, 1, 2, 3)])->Projection)" - }, - { - "SQL": "select a from t where c not in (1)", - "Best": "IndexReader(Index(t.c_d_e)[[-inf,1) (1,+inf]]->Projection)" - }, - { - "SQL": "select a from t use index(c_d_e) where c != 1", - "Best": "IndexReader(Index(t.c_d_e)[[-inf,1) (1,+inf]]->Projection)" - }, - { - "SQL": "select a from t where c_str like ''", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"\",\"\"]]->Sel([like(test.t.c_str, , 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc'", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc\",\"abc\"]]->Sel([like(test.t.c_str, abc, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str not like 'abc'", - "Best": "IndexReader(Index(t.c_d_e_str)[[NULL,+inf]]->Sel([not(like(test.t.c_str, abc, 92))])->Projection)" - }, - { - "SQL": "select a from t where not (c_str like 'abc' or c_str like 'abd')", - "Best": "IndexReader(Index(t.c_d_e_str)[[NULL,+inf]]->Sel([and(not(like(test.t.c_str, abc, 92)), not(like(test.t.c_str, abd, 92)))])->Projection)" - }, - { - "SQL": "select a from t where c_str like '_abc'", - "Best": "IndexReader(Index(t.c_d_e_str)[[NULL,+inf]]->Sel([like(test.t.c_str, _abc, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc%'", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc\",\"abd\")]->Sel([like(test.t.c_str, abc%, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc_'", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc\",\"abd\")]->Sel([like(test.t.c_str, abc_, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc%af'", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc\",\"abd\")]->Sel([like(test.t.c_str, abc%af, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc\\_' escape ''", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc_\"]]->Sel([like(test.t.c_str, abc\\_, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc\\_'", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc_\"]]->Sel([like(test.t.c_str, abc\\_, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc\\\\_'", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc_\"]]->Sel([like(test.t.c_str, abc\\_, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc\\_%'", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc`\")]->Sel([like(test.t.c_str, abc\\_%, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc=_%' escape '='", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc`\")]->Sel([like(test.t.c_str, abc=_%, 61)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 'abc\\__'", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"abc_\",\"abc`\")]->Sel([like(test.t.c_str, abc\\__, 92)])->Projection)" - }, - { - "SQL": "select a from t where c_str like 123", - "Best": "IndexReader(Index(t.c_d_e_str)[[\"123\",\"123\"]]->Sel([like(test.t.c_str, 123, 92)])->Projection)" - }, - { - "SQL": "select a from t where c = 1.9 and d > 3", - "Best": "Dual" - }, - { - "SQL": "select a from t where c < 1.1", - "Best": "IndexReader(Index(t.c_d_e)[[-inf,2)]->Projection)" - }, - { - "SQL": "select a from t where c <= 1.9", - "Best": "IndexReader(Index(t.c_d_e)[[-inf,1]]->Projection)" - }, - { - "SQL": "select a from t where c >= 1.1", - "Best": "IndexReader(Index(t.c_d_e)[[2,+inf]]->Projection)" - }, - { - "SQL": "select a from t where c > 1.9", - "Best": "IndexReader(Index(t.c_d_e)[(1,+inf]]->Projection)" - }, - { - "SQL": "select a from t where c = 123456789098765432101234", - "Best": "Dual" - }, - { - "SQL": "select a from t where c = 'hanfei'", - "Best": "IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Sel([eq(cast(test.t.c, double BINARY), cast(hanfei, double BINARY))])->Projection)" - } - ] - }, - { - "Name": "TestAggEliminator", - "Cases": [ - { - "SQL": "select max(a) from t;", - "Best": "TableReader(Table(t)->Limit)->Limit->StreamAgg" - }, - { - "SQL": "select min(a) from t;", - "Best": "TableReader(Table(t)->Limit)->Limit->StreamAgg" - }, - { - "SQL": "select min(c_str) from t;", - "Best": "IndexReader(Index(t.c_d_e_str)[[-inf,+inf]]->Limit)->Limit->StreamAgg" - }, - { - "SQL": "select max(a), b from t;", - "Best": "TableReader(Table(t)->HashAgg)->HashAgg" - }, - { - "SQL": "select max(a+1) from t;", - "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->TopN([plus(test.t.a, 1) true],0,1))->Projection->TopN([Column#40 true],0,1)->Projection->Projection->StreamAgg" - }, - { - "SQL": "select max(a), min(a) from t;", - "Best": "RightHashJoin{TableReader(Table(t)->Limit)->Limit->StreamAgg->TableReader(Table(t)->Limit)->Limit->StreamAgg}" - }, - { - "SQL": "select max(a), min(a) from t where a > 10", - "Best": "RightHashJoin{TableReader(Table(t)->Limit)->Limit->StreamAgg->TableReader(Table(t)->Limit)->Limit->StreamAgg}" - }, - { - "SQL": "select max(d), min(d) from t where c = 1 and d > 10", - "Best": "LeftHashJoin{IndexReader(Index(t.c_d_e)[(1 10,1 +inf]]->Limit)->Limit->StreamAgg->IndexReader(Index(t.c_d_e)[(1 10,1 +inf]]->Limit)->Limit->StreamAgg}" - }, - { - "SQL": "select max(a), max(c), min(f) from t", - "Best": "LeftHashJoin{RightHashJoin{TableReader(Table(t)->Limit)->Limit->StreamAgg->IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Limit)->Limit->StreamAgg}->IndexReader(Index(t.f)[[NULL,+inf]]->Limit)->Limit->StreamAgg}" - }, - { - "SQL": "select max(a), max(b) from t", - "Best": "TableReader(Table(t)->HashAgg)->HashAgg" - }, - { - "SQL": "select max(a), max(c) from t where c > 10", - "Best": "IndexReader(Index(t.c_d_e)[(10,+inf]]->HashAgg)->HashAgg" - }, - { - "SQL": "select max(a), min(a) from t where a * 3 + 10 < 100", - "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->Sel([lt(plus(mul(test.t.a, 3), 10), 100)])->HashAgg)->HashAgg" - }, - { - "SQL": "select max(a) from t group by b;", - "Best": "TableReader(Table(t)->HashAgg)->HashAgg" - }, - { - "SQL": "select max(a) from (select t1.a from t t1 join t t2 on t1.a=t2.a) t", - "Best": "MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Limit->StreamAgg" - } - ] - }, - { - "Name": "TestRuleColumnPruningLogicalApply", - "Cases": [ - { - "SQL": "SELECT COUNT(*) FROM (SELECT a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t", - "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->HashAgg)->HashAgg" - }, - { - "SQL": "SELECT COUNT(a) FROM (SELECT a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t", - "Best": "IndexReader(Index(t.f)[[NULL,+inf]]->HashAgg)->HashAgg" - }, - { - "SQL": "SELECT COUNT(t) FROM (SELECT a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t", - "Best": "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Limit}->HashAgg" - }, - { - "SQL": "SELECT COUNT(a) FROM t t1 WHERE t1.a IN (SELECT t2.a FROM t t2, t t3 WHERE t2.b = t3.b)", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.b,test.t.b)->HashAgg}(test.t.a,test.t.a)->HashAgg" - }, - { - "SQL": "SELECT a FROM (SELECT a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t", - "Best": "IndexReader(Index(t.f)[[NULL,+inf]])" - }, - { - "SQL": "SELECT a FROM t WHERE b IN (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a)", - "Best": "LeftHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->HashAgg}(test.t.b,test.t.b)" - }, - { - "SQL": "SELECT a FROM t WHERE EXISTS (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t2.b=t.b)", - "Best": "LeftHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)}(test.t.b,test.t.b)" - }, - { - "SQL": "SELECT a FROM t WHERE NOT EXISTS (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t2.b=t.b)", - "Best": "LeftHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)}(test.t.b,test.t.b)" - }, - { - "SQL": "SELECT a FROM t WHERE b IN (SELECT b FROM t WHERE b = 1 AND a IN (SELECT a FROM t WHERE a > 0))", - "Best": "RightHashJoin{IndexJoin{TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->TableReader(Table(t)->Sel([gt(test.t.a, 0)]))}(test.t.a,test.t.a)->HashAgg->TableReader(Table(t))}(test.t.b,test.t.b)" - }, - { - "SQL": "SELECT a FROM t WHERE b IN (SELECT b FROM t WHERE b = 1 AND a IN (SELECT t2.a FROM (SELECT t1.a, (SELECT t2.b FROM t t2, t t3 WHERE t2.a = t3.a AND t1.a = t2.a LIMIT 1) t FROM t t1) t2))", - "Best": "LeftHashJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t)->Sel([eq(test.t.b, 1)]))->TableReader(Table(t))}(test.t.a,test.t.a)->HashAgg}(test.t.b,test.t.b)" - } - ] - }, - { - "Name": "TestUnmatchedTableInHint", - "Cases": [ - { - "SQL": "SELECT /*+ TIDB_SMJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", - "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ MERGE_JOIN(t3, t4) */ or /*+ TIDB_SMJ(t3, t4) */. Maybe you can use the table alias name" - }, - { - "SQL": "SELECT /*+ TIDB_HJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", - "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ HASH_JOIN(t3, t4) */ or /*+ TIDB_HJ(t3, t4) */. Maybe you can use the table alias name" - }, - { - "SQL": "SELECT /*+ TIDB_INLJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", - "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ INL_JOIN(t3, t4) */ or /*+ TIDB_INLJ(t3, t4) */. Maybe you can use the table alias name" - }, - { - "SQL": "SELECT /*+ TIDB_SMJ(t1, t2) */ * from t t1, t t2 where t1.a = t2.a", - "Warning": "" - }, - { - "SQL": "SELECT /*+ TIDB_SMJ(t3, t4) */ * from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a", - "Warning": "[planner:1815]There are no matching table names for (t4) in optimizer hint /*+ MERGE_JOIN(t3, t4) */ or /*+ TIDB_SMJ(t3, t4) */. Maybe you can use the table alias name" - } - ] - }, - { - "Name": "TestJoinHints", - "Cases": [ - { - "SQL": "select /*+ TIDB_INLJ(t1) */ t1.a, t2.a, t3.a from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a;", - "Best": "MergeInnerJoin{IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)", - "Warning": "", - "Hints": "merge_join(`test`.`t3`), leading(`test`.`t1`, `test`.`t2`, `test`.`t3`), inl_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_1` `test`.`t2` ), order_index(@`sel_1` `test`.`t2` `primary`), use_index(@`sel_1` `test`.`t3` ), order_index(@`sel_1` `test`.`t3` `primary`)" - }, - { - "SQL": "select /*+ TIDB_INLJ(test.t1) */ t1.a, t2.a, t3.a from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a;", - "Best": "MergeInnerJoin{IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)", - "Warning": "", - "Hints": "merge_join(`test`.`t3`), leading(`test`.`t1`, `test`.`t2`, `test`.`t3`), inl_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_1` `test`.`t2` ), order_index(@`sel_1` `test`.`t2` `primary`), use_index(@`sel_1` `test`.`t3` ), order_index(@`sel_1` `test`.`t3` `primary`)" - }, - { - "SQL": "select /*+ TIDB_INLJ(t1) */ t1.b, t2.a from t t1, t t2 where t1.b = t2.a;", - "Best": "LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.b,test.t.a)", - "Warning": "[planner:1815]Optimizer Hint /*+ INL_JOIN(t1) */ or /*+ TIDB_INLJ(t1) */ is inapplicable", - "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_1` `test`.`t2` `f`), no_order_index(@`sel_1` `test`.`t2` `f`)" - }, - { - "SQL": "select /*+ TIDB_INLJ(t2) */ t1.b, t2.a from t2 t1, t2 t2 where t1.b=t2.b and t2.c=-1;", - "Best": "IndexJoin{TableReader(Table(t2)->Sel([eq(test.t2.c, -1)]))->IndexReader(Index(t2.b)[[NULL,NULL]])}(test.t2.b,test.t2.b)->Projection", - "Warning": "[planner:1815]Optimizer Hint /*+ INL_JOIN(t2) */ or /*+ TIDB_INLJ(t2) */ is inapplicable", - "Hints": "inl_join(`test`.`t1`), use_index(@`sel_1` `test`.`t2` ), no_order_index(@`sel_1` `test`.`t2` `primary`), use_index(@`sel_1` `test`.`t1` `b`), no_order_index(@`sel_1` `test`.`t1` `b`)" - } - ] - }, - { - "Name": "TestAggregationHints", - "Cases": [ - { - "SQL": "select count(*) from t t1, t t2 where t1.a = t2.b", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->StreamAgg", - "Warning": "" - }, - { - "SQL": "select count(t1.a) from t t1, t t2 where t1.a = t2.a*2 group by t1.a", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]])->Projection}(test.t.a,Column#26)->HashAgg", - "Warning": "" - }, - { - "SQL": "select /*+ HASH_AGG() */ count(*) from t t1, t t2 where t1.a = t2.b", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->HashAgg", - "Warning": "" - }, - { - "SQL": "select /*+ STREAM_AGG() */ count(t1.a) from t t1, t t2 where t1.a = t2.a*2 group by t1.a", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]])->Projection}(test.t.a,Column#26)->Sort->StreamAgg", - "Warning": "" - }, - { - "SQL": "select /*+ HASH_AGG() STREAM_AGG() */ count(*) from t t1, t t2 where t1.a = t2.b", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))}(test.t.a,test.t.b)->StreamAgg", - "Warning": "[planner:1815]Optimizer aggregation hints are conflicted" - }, - { - "SQL": "select /*+ STREAM_AGG() */ distinct a from t", - "Best": "IndexReader(Index(t.f)[[NULL,+inf]])", - "Warning": "" - }, - { - "SQL": "select /*+ HASH_AGG() */ t1.a from t t1 where t1.a < any(select t2.b from t t2)", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t)->HashAgg)->HashAgg->Sel([ne(Column#27, 0)])}", - "Warning": "" - }, - { - "SQL": "select /*+ hash_agg() */ t1.a from t t1 where t1.a != any(select t2.b from t t2)", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))->HashAgg->Sel([ne(Column#28, 0)])}", - "Warning": "" - }, - { - "SQL": "select /*+ hash_agg() */ t1.a from t t1 where t1.a = all(select t2.b from t t2)", - "Best": "LeftHashJoin{IndexReader(Index(t.f)[[NULL,+inf]])->TableReader(Table(t))->HashAgg->Sel([or(and(le(Column#26, 1), if(ne(Column#27, 0), , 1)), or(eq(Column#28, 0), 0))])}", - "Warning": "" - }, - { - "SQL": "select /*+ STREAM_AGG() */ sum(t1.a) from t t1 join t t2 on t1.b = t2.b group by t1.b", - "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))->Sort->Projection->StreamAgg}(test.t.b,test.t.b)->HashAgg", - "Warning": "" - }, - { - "SQL": "select /*+ STREAM_AGG() */ e, sum(b) from t group by e", - "Best": "TableReader(Table(t))->Sort->Projection->StreamAgg->Projection", - "Warning": "" - } - ] - }, - { - "Name": "TestQueryBlockHint", - "Cases": [ - { - "SQL": "select /*+ MERGE_JOIN(@sel_1 t1), INL_JOIN(@sel_2 t3) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Plan": "IndexJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)", - "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), no_order_index(@`sel_2` `test`.`t3` `c_d_e`)" - }, - { - "SQL": "select /*+ MERGE_JOIN(@sel_1 t1), INL_JOIN(@qb t3) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Plan": "IndexJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,NULL]])}(test.t.a,test.t.c)", - "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), no_order_index(@`sel_2` `test`.`t3` `c_d_e`)" - }, - { - "SQL": "select /*+ HASH_JOIN(@sel_1 t1), MERGE_JOIN(@sel_2 t2) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Plan": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)", - "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), order_index(@`sel_2` `test`.`t3` `c_d_e`)" - }, - { - "SQL": "select /*+ HASH_JOIN(@sel_1 t1), MERGE_JOIN(@qb t2) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Plan": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)", - "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), order_index(@`sel_2` `test`.`t3` `c_d_e`)" - }, - { - "SQL": "select /*+ INL_JOIN(@sel_1 t1), HASH_JOIN(@sel_2 t2) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Plan": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)", - "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), order_index(@`sel_2` `test`.`t3` `c_d_e`)" - }, - { - "SQL": "select /*+ INL_JOIN(@sel_1 t1), HASH_JOIN(@qb t2) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", - "Plan": "MergeInnerJoin{MergeInnerJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t.a,test.t.c)", - "Hints": "use_index(@`sel_1` `test`.`t1` ), order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` ), order_index(@`sel_2` `test`.`t2` `primary`), use_index(@`sel_2` `test`.`t3` `c_d_e`), order_index(@`sel_2` `test`.`t3` `c_d_e`)" - }, - { - "SQL": "select /*+ HASH_AGG(@sel_1), STREAM_AGG(@sel_2) */ count(*) from t t1 where t1.a < (select count(*) from t t2 where t1.a > t2.a)", - "Plan": "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)])->StreamAgg)->StreamAgg}->HashAgg", - "Hints": "hash_agg(@`sel_1`), use_index(@`sel_1` `test`.`t1` `f`), no_order_index(@`sel_1` `test`.`t1` `f`), stream_agg(@`sel_2`), use_index(@`sel_2` `test`.`t2` `f`), no_order_index(@`sel_2` `test`.`t2` `f`), agg_to_cop(@`sel_2`)" - }, - { - "SQL": "select /*+ STREAM_AGG(@sel_1), HASH_AGG(@qb) */ count(*) from t t1 where t1.a < (select /*+ QB_NAME(qb) */ count(*) from t t2 where t1.a > t2.a)", - "Plan": "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)])->HashAgg)->HashAgg}->StreamAgg", - "Hints": "stream_agg(@`sel_1`), use_index(@`sel_1` `test`.`t1` `f`), no_order_index(@`sel_1` `test`.`t1` `f`), hash_agg(@`sel_2`), use_index(@`sel_2` `test`.`t2` `f`), no_order_index(@`sel_2` `test`.`t2` `f`), agg_to_cop(@`sel_2`)" - }, - { - "SQL": "select /*+ HASH_AGG(@sel_2) */ a, (select count(*) from t t1 where t1.b > t.a) from t where b > (select b from t t2 where t2.b = t.a limit 1)", - "Plan": "Apply{Apply{TableReader(Table(t))->TableReader(Table(t)->Sel([eq(test.t.b, test.t.a)])->Limit)->Limit}->TableReader(Table(t)->Sel([gt(test.t.b, test.t.a)])->HashAgg)->HashAgg}->Projection", - "Hints": "use_index(@`sel_1` `test`.`t` ), no_order_index(@`sel_1` `test`.`t` `primary`), use_index(@`sel_3` `test`.`t2` ), no_order_index(@`sel_3` `test`.`t2` `primary`), limit_to_cop(@`sel_3`), hash_agg(@`sel_2`), use_index(@`sel_2` `test`.`t1` ), no_order_index(@`sel_2` `test`.`t1` `primary`), agg_to_cop(@`sel_2`)" - }, - { - "SQL": "select /*+ HASH_JOIN(@sel_1 t1), HASH_JOIN(@sel_2 t1) */ t1.b, t2.a, t2.aa from t t1, (select t1.a as a, t2.a as aa from t t1, t t2) t2 where t1.a = t2.aa;", - "Plan": "LeftHashJoin{LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.a,test.t.a)->IndexReader(Index(t.f)[[NULL,+inf]])}->Projection", - "Hints": "use_index(@`sel_1` `test`.`t1` ), no_order_index(@`sel_1` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` `f`), no_order_index(@`sel_2` `test`.`t2` `f`), use_index(@`sel_2` `test`.`t1` `f`), no_order_index(@`sel_2` `test`.`t1` `f`)" - }, - { - "SQL": "select /*+ HASH_JOIN(@sel_2 t1@sel_2, t2@sel_2), MERGE_JOIN(@sel_1 t1@sel_1, t2@sel_1) */ * from (select t1.a, t1.b from t t1, t t2 where t1.a = t2.a) t1, t t2 where t1.b = t2.b", - "Plan": "MergeInnerJoin{TableReader(Table(t))->Sort->LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.f)[[NULL,+inf]])}(test.t.a,test.t.a)->Sort}(test.t.b,test.t.b)->Projection", - "Hints": "use_index(@`sel_1` `test`.`t2` ), no_order_index(@`sel_1` `test`.`t2` `primary`), hash_join_build(@`sel_2` `test`.`t2`@`sel_2`), use_index(@`sel_2` `test`.`t1` ), no_order_index(@`sel_2` `test`.`t1` `primary`), use_index(@`sel_2` `test`.`t2` `f`), no_order_index(@`sel_2` `test`.`t2` `f`)" - } - ] - }, - { - "Name": "TestSemiJoinToInner", - "Cases": [ - { - "SQL": "select t1.a, (select count(t2.a) from t t2 where t2.g in (select t3.d from t t3 where t3.c = t1.a)) as agg_col from t t1;", - "Best": "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexHashJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]]->HashAgg)->HashAgg->IndexReader(Index(t.g)[[NULL,NULL]])}(test.t.d,test.t.g)}->HashAgg" - } - ] - }, - { - "Name": "TestIndexJoinHint", - "Cases": [ - { - "SQL": "select /*+ INL_JOIN(t1) */ * from t1 join t2 on t1.a = t2.a;", - "Plan": "IndexJoin{IndexLookUp(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]), Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t2.a,test.t1.a)", - "Warns": null - }, - { - "SQL": "select /*+ INL_HASH_JOIN(t1) */ * from t1 join t2 on t1.a = t2.a;", - "Plan": "IndexHashJoin{IndexLookUp(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]), Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t2.a,test.t1.a)", - "Warns": null - }, - { - "SQL": "select /*+ INL_MERGE_JOIN(t1) */ * from t1 join t2 on t1.a = t2.a;", - "Plan": "LeftHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", - "Warns": [ - "[planner:1815]The INDEX MERGE JOIN hint is deprecated for usage, try other hints." - ] - }, - { - "SQL": "select /*+ inl_merge_join(t2) */ t1.a, t2.a from t t1 left join t t2 use index(g_2) on t1.g=t2.g", - "Plan": "MergeLeftOuterJoin{IndexReader(Index(t.g_2)[[NULL,+inf]])->IndexReader(Index(t.g_2)[[-inf,+inf]])}(test.t.g,test.t.g)", - "Warns": [ - "[planner:1815]The INDEX MERGE JOIN hint is deprecated for usage, try other hints." - ] - }, - { - "SQL": "select /*+inl_merge_join(t2)*/ t1.a, t2.a from t t1 left join t t2 use index(g_2) on t1.g=t2.g order by t1.a", - "Plan": "IndexHashJoin{TableReader(Table(t))->IndexReader(Index(t.g_2)[[NULL,NULL]]->Sel([not(isnull(test.t.g))]))}(test.t.g,test.t.g)", - "Warns": [ - "[planner:1815]The INDEX MERGE JOIN hint is deprecated for usage, try other hints." - ] - } - ] - }, - { - "Name": "TestAggToCopHint", - "Cases": [ - { - "SQL": "select /*+ AGG_TO_COP(), HASH_AGG(), USE_INDEX(t) */ sum(a) from ta group by a", - "Best": "IndexReader(Index(ta.a)[[NULL,+inf]]->HashAgg)->HashAgg", - "Warning": "[planner:1815]use_index(test.t) is inapplicable, check whether the table(test.t) exists" - }, - { - "SQL": "select /*+ AGG_TO_COP(), USE_INDEX(t) */ sum(b) from ta group by b", - "Best": "TableReader(Table(ta)->HashAgg)->HashAgg", - "Warning": "[planner:1815]use_index(test.t) is inapplicable, check whether the table(test.t) exists" - }, - { - "SQL": "select /*+ AGG_TO_COP(), HASH_AGG(), USE_INDEX(t) */ distinct a from ta group by a", - "Best": "IndexReader(Index(ta.a)[[NULL,+inf]]->HashAgg)->HashAgg", - "Warning": "[planner:1815]use_index(test.t) is inapplicable, check whether the table(test.t) exists" - }, - { - "SQL": "select /*+ AGG_TO_COP(), HASH_AGG(), HASH_JOIN(t1), USE_INDEX(t1), USE_INDEX(t2) */ sum(t1.a) from ta t1, ta t2 where t1.a = t2.b group by t1.a", - "Best": "LeftHashJoin{TableReader(Table(ta)->Sel([not(isnull(test.ta.a))]))->TableReader(Table(ta)->Sel([not(isnull(test.ta.b))]))}(test.ta.a,test.ta.b)->Projection->HashAgg", - "Warning": "[planner:1815]Optimizer Hint AGG_TO_COP is inapplicable" - } - ] - }, - { - "Name": "TestGroupConcatOrderby", - "Cases": [ - { - "SQL": "select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test;", - "Plan": [ - "HashAgg 1.00 root funcs:group_concat(Column#6 order by Column#7 desc separator \"++\")->Column#4, funcs:group_concat(Column#8 order by Column#7 desc, Column#9 separator \"--\")->Column#5", - "└─Projection 10000.00 root cast(test.test.name, var_string(20))->Column#6, test.test.name->Column#7, cast(test.test.id, var_string(20))->Column#8, test.test.id->Column#9", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:test keep order:false, stats:pseudo" - ], - "Result": [ - "500++200++30++20++20++10 3--3--1--1--2--1" - ] - }, - { - "SQL": "select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest;", - "Plan": [ - "HashAgg 1.00 root funcs:group_concat(Column#6 order by Column#7 desc separator \"++\")->Column#4, funcs:group_concat(Column#8 order by Column#7 desc, Column#9 separator \"--\")->Column#5", - "└─Projection 10000.00 root cast(test.ptest.name, var_string(20))->Column#6, test.ptest.name->Column#7, cast(test.ptest.id, var_string(20))->Column#8, test.ptest.id->Column#9", - " └─TableReader 10000.00 root partition:all data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:ptest keep order:false, stats:pseudo" - ], - "Result": [ - "500++200++30++20++20++10 3--3--1--1--2--1" - ] - }, - { - "SQL": "select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from test;", - "Plan": [ - "HashAgg 1.00 root funcs:group_concat(distinct Column#5 order by Column#6 desc separator \",\")->Column#4", - "└─Projection 10000.00 root cast(test.test.name, var_string(20))->Column#5, test.test.name->Column#6", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:test keep order:false, stats:pseudo" - ], - "Result": [ - "500,200,30,20,10" - ] - }, - { - "SQL": "select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest;", - "Plan": [ - "HashAgg 1.00 root funcs:group_concat(distinct Column#5 order by Column#6 desc separator \",\")->Column#4", - "└─Projection 10000.00 root cast(test.ptest.name, var_string(20))->Column#5, test.ptest.name->Column#6", - " └─TableReader 10000.00 root partition:all data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:ptest keep order:false, stats:pseudo" - ], - "Result": [ - "500,200,30,20,10" - ] - } - ] - }, - { - "Name": "TestInlineProjection", - "Cases": [ - { - "SQL": "select /*+ HASH_JOIN(t1) */ t1.b, t2.b from t1, t2 where t1.a = t2.a;", - "Plan": "LeftHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", - "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" - }, - { - "SQL": "select /*+ HASH_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", - "Plan": "LeftHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", - "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" - }, - { - "SQL": "select /*+ HASH_JOIN(t1) */ t1.b, t2.b from t1 left outer join t2 on t1.a = t2.a;", - "Plan": "LeftHashJoin{TableReader(Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", - "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" - }, - { - "SQL": "select /*+ HASH_JOIN(t1) */ t1.b, t2.b from t1 right outer join t2 on t1.a = t2.a;", - "Plan": "RightHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2))}(test.t1.a,test.t2.a)", - "Hints": "hash_join_build(`test`.`t1`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" - }, - { - "SQL": "select 1 from (select /*+ HASH_JOIN(t1) */ t1.a in (select t2.a from t2) from t1) x;", - "Plan": "LeftHashJoin{IndexReader(Index(t1.idx_a)[[NULL,+inf]])->IndexReader(Index(t2.idx_a)[[NULL,+inf]])}->Projection", - "Hints": "hash_join(@`sel_2` `test`.`t1`@`sel_2`), use_index(@`sel_2` `test`.`t1` `idx_a`), no_order_index(@`sel_2` `test`.`t1` `idx_a`), use_index(@`sel_3` `test`.`t2` `idx_a`), no_order_index(@`sel_3` `test`.`t2` `idx_a`)" - }, - { - "SQL": "select 1 from (select /*+ HASH_JOIN(t1) */ t1.a not in (select t2.a from t2) from t1) x;", - "Plan": "LeftHashJoin{IndexReader(Index(t1.idx_a)[[NULL,+inf]])->IndexReader(Index(t2.idx_a)[[NULL,+inf]])}->Projection", - "Hints": "hash_join(@`sel_2` `test`.`t1`@`sel_2`), use_index(@`sel_2` `test`.`t1` `idx_a`), no_order_index(@`sel_2` `test`.`t1` `idx_a`), use_index(@`sel_3` `test`.`t2` `idx_a`), no_order_index(@`sel_3` `test`.`t2` `idx_a`)" - }, - { - "SQL": "select /*+ INL_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", - "Plan": "IndexJoin{IndexLookUp(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]), Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t2.a,test.t1.a)", - "Hints": "inl_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` `idx_a`), no_order_index(@`sel_1` `test`.`t1` `idx_a`), use_index(@`sel_1` `test`.`t2` )" - }, - { - "SQL": "select /*+ INL_HASH_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", - "Plan": "IndexHashJoin{IndexLookUp(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]), Table(t1))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t2.a,test.t1.a)", - "Hints": "inl_hash_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` `idx_a`), no_order_index(@`sel_1` `test`.`t1` `idx_a`), use_index(@`sel_1` `test`.`t2` )" - }, - { - "SQL": "select /*+ INL_MERGE_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", - "Plan": "LeftHashJoin{TableReader(Table(t1)->Sel([not(isnull(test.t1.a))]))->TableReader(Table(t2)->Sel([not(isnull(test.t2.a))]))}(test.t1.a,test.t2.a)", - "Hints": "hash_join_build(`test`.`t2`), use_index(@`sel_1` `test`.`t1` ), use_index(@`sel_1` `test`.`t2` )" - }, - { - "SQL": "select /*+ MERGE_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", - "Plan": "MergeInnerJoin{IndexLookUp(Index(t1.idx_a)[[-inf,+inf]], Table(t1))->Projection->IndexLookUp(Index(t2.idx_a)[[-inf,+inf]], Table(t2))->Projection}(test.t1.a,test.t2.a)", - "Hints": "merge_join(`test`.`t1`), use_index(@`sel_1` `test`.`t1` `idx_a`), order_index(@`sel_1` `test`.`t1` `idx_a`), use_index(@`sel_1` `test`.`t2` `idx_a`), order_index(@`sel_1` `test`.`t2` `idx_a`)" - } - ] - }, - { - "Name": "TestHintFromDiffDatabase", - "Cases": [ - { - "SQL": "select /*+ inl_hash_join(test.t1) */ * from test.t2 join test.t1 on test.t2.a = test.t1.a", - "Plan": "IndexHashJoin{IndexReader(Index(t2.idx_a)[[-inf,+inf]])->IndexReader(Index(t1.idx_a)[[NULL,NULL]]->Sel([not(isnull(test.t1.a))]))}(test.t2.a,test.t1.a)" - } - ] - }, - { - "Name": "TestMPPSinglePartitionType", - "Cases": [ - { - "SQL": "select * from employee where deptid>1", - "Plan": [ - "TableReader 3333.33 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: PassThrough", - " └─Selection 3333.33 mpp[tiflash] gt(test.employee.deptid, 1)", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select deptid+5, empid*10 from employee where deptid>1", - "Plan": [ - "TableReader 3333.33 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 3333.33 mpp[tiflash] plus(test.employee.deptid, 5)->Column#5, mul(test.employee.empid, 10)->Column#6", - " └─Selection 3333.33 mpp[tiflash] gt(test.employee.deptid, 1)", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select count(*) from employee group by deptid+1", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] Column#5", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#12, funcs:sum(Column#13)->Column#5", - " └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#12, collate: binary]", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#14, funcs:count(1)->Column#13", - " └─Projection 10000.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#14", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select count(distinct deptid) a from employee", - "Plan": [ - "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 1.00 mpp[tiflash] Column#5", - " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#7)->Column#5", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#7", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from employee join employee e1 using(deptid)", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 12487.50 mpp[tiflash] test.employee.deptid, test.employee.empid, test.employee.salary, test.employee.empid, test.employee.salary", - " └─Projection 12487.50 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, test.employee.empid, test.employee.salary", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, test.employee.deptid)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select count(distinct a) from (select count(distinct deptid) a from employee) x", - "Plan": [ - "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 1.00 mpp[tiflash] Column#6", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct Column#5)->Column#6", - " └─Projection 1.00 mpp[tiflash] Column#5", - " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#8)->Column#5", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#8", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select count(a) from (select count(distinct deptid) a, count(distinct empid) b from employee) x group by b+1", - "Plan": [ - "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 1.00 mpp[tiflash] Column#7", - " └─HashAgg 1.00 mpp[tiflash] group by:Column#12, funcs:sum(Column#13)->Column#7", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#12, collate: binary]", - " └─HashAgg 1.00 mpp[tiflash] group by:Column#15, funcs:count(Column#14)->Column#13", - " └─Projection 1.00 mpp[tiflash] Column#5->Column#14, plus(Column#6, 1)->Column#15", - " └─Projection 1.00 mpp[tiflash] Column#5, Column#6", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#5, funcs:count(distinct test.employee.empid)->Column#6", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, test.employee.empid, ", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select count(a) from (select count(distinct deptid) a, count(distinct empid) b from employee) x group by b", - "Plan": [ - "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 1.00 mpp[tiflash] Column#7", - " └─HashAgg 1.00 mpp[tiflash] group by:Column#6, funcs:count(Column#5)->Column#7", - " └─Projection 1.00 mpp[tiflash] Column#5, Column#6", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#5, funcs:count(distinct test.employee.empid)->Column#6", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, test.employee.empid, ", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from employee join (select count(distinct deptid) a, count(distinct empid) b from employee) e1", - "Plan": [ - "TableReader 10000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 10000.00 mpp[tiflash] CARTESIAN inner join", - " ├─ExchangeReceiver(Build) 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 1.00 mpp[tiflash] Column#9, Column#10", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#9, funcs:count(distinct test.employee.empid)->Column#10", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, test.employee.empid, ", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from employee e1 join (select count(distinct deptid) a from employee) e2 on e1.deptid = e2.a", - "Plan": [ - "TableReader 1.25 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 1.25 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 1.25 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#9)]", - " ├─ExchangeReceiver(Build) 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 1.00 mpp[tiflash] Column#9", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#10)->Column#9", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#10", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from (select count(distinct deptid) a from employee) e1 join employee e2 on e1.a = e2.deptid", - "Plan": [ - "TableReader 1.25 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 1.25 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 1.25 mpp[tiflash] Column#5, test.employee.empid, test.employee.deptid, test.employee.salary", - " └─HashJoin 1.25 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#5)]", - " ├─ExchangeReceiver(Build) 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 1.00 mpp[tiflash] Column#5", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#10)->Column#5", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#10", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from (select count(distinct deptid) a from employee) e1 join (select count(distinct deptid) b from employee) e2 on e1.a=e2.b", - "Plan": [ - "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 1.00 mpp[tiflash] inner join, equal:[eq(Column#5, Column#10)]", - " ├─ExchangeReceiver(Build) 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 1.00 mpp[tiflash] Column#5", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#11)->Column#5", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#11", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─Projection(Probe) 1.00 mpp[tiflash] Column#10", - " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#12)->Column#10", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#12", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from employee e1 join employee e2 on e1.deptid = e2.deptid", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, test.employee.deptid)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from (select deptid+1 d, count(empid) a from employee group by d) e1 join employee e2 on e1.d = e2.deptid", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] Column#6, Column#5, test.employee.empid, test.employee.deptid, test.employee.salary", - " └─HashJoin 8000.00 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#6)]", - " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", - " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#6, Column#5", - " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", - " │ └─Projection 8000.00 mpp[tiflash] Column#5, test.employee.deptid", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#5, funcs:firstrow(Column#15)->test.employee.deptid", - " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#27, funcs:count(Column#25)->Column#14, funcs:firstrow(Column#26)->Column#15", - " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#25, test.employee.deptid->Column#26, plus(test.employee.deptid, 1)->Column#27", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from employee e1 join (select deptid+1 d, count(empid) a from employee group by d) e2 on e1.deptid = e2.d", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 8000.00 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#10)]", - " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", - " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#10, Column#9", - " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", - " │ └─Projection 8000.00 mpp[tiflash] Column#9, test.employee.deptid", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#9, funcs:firstrow(Column#15)->test.employee.deptid", - " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#27, funcs:count(Column#25)->Column#14, funcs:firstrow(Column#26)->Column#15", - " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#25, test.employee.deptid->Column#26, plus(test.employee.deptid, 1)->Column#27", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from (select deptid+1 d, count(empid) a from employee group by d) e1 join (select deptid+1 d, count(empid) a from employee group by d) e2 on e1.d = e2.d", - "Plan": [ - "TableReader 6400.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 6400.00 mpp[tiflash] inner join, equal:[eq(Column#6, Column#12)]", - " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", - " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#6, Column#5", - " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", - " │ └─Projection 8000.00 mpp[tiflash] Column#5, test.employee.deptid", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#17, funcs:sum(Column#18)->Column#5, funcs:firstrow(Column#19)->test.employee.deptid", - " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#17, collate: binary]", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#43, funcs:count(Column#41)->Column#18, funcs:firstrow(Column#42)->Column#19", - " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#41, test.employee.deptid->Column#42, plus(test.employee.deptid, 1)->Column#43", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─Projection(Probe) 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#12, Column#11", - " └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", - " └─Projection 8000.00 mpp[tiflash] Column#11, test.employee.deptid", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#20, funcs:sum(Column#21)->Column#11, funcs:firstrow(Column#22)->test.employee.deptid", - " └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#20, collate: binary]", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#46, funcs:count(Column#44)->Column#21, funcs:firstrow(Column#45)->Column#22", - " └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#44, test.employee.deptid->Column#45, plus(test.employee.deptid, 1)->Column#46", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "set tidb_broadcast_join_threshold_count=1", - "Plan": null - }, - { - "SQL": "set tidb_broadcast_join_threshold_size=1", - "Plan": null - }, - { - "SQL": "select * from (select count(distinct deptid) a from employee) e1 join employee e2 on e1.a = e2.deptid", - "Plan": [ - "Projection 1.25 root Column#5, test.employee.empid, test.employee.deptid, test.employee.salary", - "└─HashJoin 1.25 root inner join, equal:[eq(test.employee.deptid, Column#5)]", - " ├─TableReader(Build) 1.00 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " │ └─Projection 1.00 mpp[tiflash] Column#5", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#12)->Column#5", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#12", - " │ └─ExchangeReceiver 1.00 mpp[tiflash] ", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " │ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from (select count(distinct deptid) a from employee) e1 join (select count(distinct deptid) b from employee) e2 on e1.a=e2.b", - "Plan": [ - "HashJoin 1.00 root inner join, equal:[eq(Column#5, Column#10)]", - "├─TableReader(Build) 1.00 root MppVersion: 2, data:ExchangeSender", - "│ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - "│ └─Projection 1.00 mpp[tiflash] Column#10", - "│ └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#16)->Column#10", - "│ └─ExchangeReceiver 1.00 mpp[tiflash] ", - "│ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - "│ └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#16", - "│ └─ExchangeReceiver 1.00 mpp[tiflash] ", - "│ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - "│ └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - "│ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - "└─TableReader(Probe) 1.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 1.00 mpp[tiflash] Column#5", - " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#15)->Column#5", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct test.employee.deptid)->Column#15", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " └─HashAgg 1.00 mpp[tiflash] group by:test.employee.deptid, ", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from employee e1 join employee e2 on e1.deptid = e2.deptid", - "Plan": [ - "TableReader 12487.50 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, test.employee.deptid)]", - " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", - " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.employee.deptid, collate: binary]", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from (select deptid+1 d, count(empid) a from employee group by d) e1 join employee e2 on e1.d = e2.deptid", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] Column#6, Column#5, test.employee.empid, test.employee.deptid, test.employee.salary", - " └─Projection 8000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#6, Column#5, Column#26", - " └─HashJoin 8000.00 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#6)]", - " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", - " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#6, collate: binary]", - " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#6, Column#5", - " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", - " │ └─Projection 8000.00 mpp[tiflash] Column#5, test.employee.deptid", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#5, funcs:firstrow(Column#15)->test.employee.deptid", - " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#29, funcs:count(Column#27)->Column#14, funcs:firstrow(Column#28)->Column#15", - " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#27, test.employee.deptid->Column#28, plus(test.employee.deptid, 1)->Column#29", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#26, collate: binary]", - " └─Projection 9990.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, cast(test.employee.deptid, bigint(20))->Column#26", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e2 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from employee e1 join (select deptid+1 d, count(empid) a from employee group by d) e2 on e1.deptid = e2.d", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#10, Column#9", - " └─Projection 8000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#10, Column#9, Column#26", - " └─HashJoin 8000.00 mpp[tiflash] inner join, equal:[eq(test.employee.deptid, Column#10)]", - " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", - " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#10, collate: binary]", - " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#10, Column#9", - " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", - " │ └─Projection 8000.00 mpp[tiflash] Column#9, test.employee.deptid", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#9, funcs:firstrow(Column#15)->test.employee.deptid", - " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#29, funcs:count(Column#27)->Column#14, funcs:firstrow(Column#28)->Column#15", - " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#27, test.employee.deptid->Column#28, plus(test.employee.deptid, 1)->Column#29", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 9990.00 mpp[tiflash] ", - " └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#26, collate: binary]", - " └─Projection 9990.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, cast(test.employee.deptid, bigint(20))->Column#26", - " └─Selection 9990.00 mpp[tiflash] not(isnull(test.employee.deptid))", - " └─TableFullScan 10000.00 mpp[tiflash] table:e1 pushed down filter:empty, keep order:false, stats:pseudo" - ] - }, - { - "SQL": "select * from (select deptid+1 d, count(empid) a from employee group by d) e1 join (select deptid+1 d, count(empid) a from employee group by d) e2 on e1.d = e2.d", - "Plan": [ - "TableReader 6400.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashJoin 6400.00 mpp[tiflash] inner join, equal:[eq(Column#6, Column#12)]", - " ├─ExchangeReceiver(Build) 6400.00 mpp[tiflash] ", - " │ └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#6, collate: binary]", - " │ └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#6, Column#5", - " │ └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", - " │ └─Projection 8000.00 mpp[tiflash] Column#5, test.employee.deptid", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#17, funcs:sum(Column#18)->Column#5, funcs:firstrow(Column#19)->test.employee.deptid", - " │ └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " │ └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#17, collate: binary]", - " │ └─HashAgg 8000.00 mpp[tiflash] group by:Column#43, funcs:count(Column#41)->Column#18, funcs:firstrow(Column#42)->Column#19", - " │ └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#41, test.employee.deptid->Column#42, plus(test.employee.deptid, 1)->Column#43", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo", - " └─ExchangeReceiver(Probe) 6400.00 mpp[tiflash] ", - " └─ExchangeSender 6400.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#12, collate: binary]", - " └─Projection 6400.00 mpp[tiflash] plus(test.employee.deptid, 1)->Column#12, Column#11", - " └─Selection 6400.00 mpp[tiflash] not(isnull(plus(test.employee.deptid, 1)))", - " └─Projection 8000.00 mpp[tiflash] Column#11, test.employee.deptid", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#20, funcs:sum(Column#21)->Column#11, funcs:firstrow(Column#22)->test.employee.deptid", - " └─ExchangeReceiver 8000.00 mpp[tiflash] ", - " └─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#20, collate: binary]", - " └─HashAgg 8000.00 mpp[tiflash] group by:Column#46, funcs:count(Column#44)->Column#21, funcs:firstrow(Column#45)->Column#22", - " └─Projection 10000.00 mpp[tiflash] test.employee.empid->Column#44, test.employee.deptid->Column#45, plus(test.employee.deptid, 1)->Column#46", - " └─TableFullScan 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" - ] - } - ] - }, - { - "Name": "TestSemiJoinRewriteHints", - "Cases": [ - { - "SQL": "select /*+ SEMI_JOIN_REWRITE() */ * from t", - "Plan": [ - "TableReader 10000.00 root data:TableFullScan", - "└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": "[planner:1815]The SEMI_JOIN_REWRITE hint is not used correctly, maybe it's not in a subquery or the subquery is not EXISTS clause." - }, - { - "SQL": "select * from t where a > (select /*+ SEMI_JOIN_REWRITE() */ min(b) from t t1 where t1.c = t.c)", - "Plan": [ - "HashJoin 7992.00 root inner join, equal:[eq(test.t.c, test.t.c)], other cond:gt(test.t.a, Column#9)", - "├─Selection(Build) 6393.60 root not(isnull(Column#9))", - "│ └─HashAgg 7992.00 root group by:test.t.c, funcs:min(Column#10)->Column#9, funcs:firstrow(test.t.c)->test.t.c", - "│ └─TableReader 7992.00 root data:HashAgg", - "│ └─HashAgg 7992.00 cop[tikv] group by:test.t.c, funcs:min(test.t.b)->Column#10", - "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t.c))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─TableReader(Probe) 9980.01 root data:Selection", - " └─Selection 9980.01 cop[tikv] not(isnull(test.t.a)), not(isnull(test.t.c))", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": "[planner:1815]The SEMI_JOIN_REWRITE hint is not used correctly, maybe it's not in a subquery or the subquery is not EXISTS clause." - }, - { - "SQL": "select * from t where exists (select /*+ SEMI_JOIN_REWRITE() */ 1 from t t1 where t1.a=t.a)", - "Plan": [ - "HashJoin 9990.00 root inner join, equal:[eq(test.t.a, test.t.a)]", - "├─HashAgg(Build) 7992.00 root group by:test.t.a, funcs:firstrow(test.t.a)->test.t.a", - "│ └─TableReader 7992.00 root data:HashAgg", - "│ └─HashAgg 7992.00 cop[tikv] group by:test.t.a, ", - "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": "" - }, - { - "SQL": "select * from t where exists (select /*+ SEMI_JOIN_REWRITE() */ t.b from t t1 where t1.a=t.a)", - "Plan": [ - "HashJoin 9990.00 root inner join, equal:[eq(test.t.a, test.t.a)]", - "├─HashAgg(Build) 7992.00 root group by:test.t.a, funcs:firstrow(test.t.a)->test.t.a", - "│ └─TableReader 7992.00 root data:HashAgg", - "│ └─HashAgg 7992.00 cop[tikv] group by:test.t.a, ", - "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": "" - }, - { - "SQL": "select exists(select /*+ SEMI_JOIN_REWRITE() */ * from t t1 where t1.a=t.a) from t", - "Plan": [ - "HashJoin 10000.00 root left outer semi join, equal:[eq(test.t.a, test.t.a)]", - "├─TableReader(Build) 10000.00 root data:TableFullScan", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": "[planner:1815]SEMI_JOIN_REWRITE() is inapplicable for LeftOuterSemiJoin." - }, - { - "SQL": "select * from t where exists (select /*+ SEMI_JOIN_REWRITE() */ 1 from t t1 where t1.a > t.a)", - "Plan": [ - "HashJoin 7992.00 root CARTESIAN semi join, other cond:gt(test.t.a, test.t.a)", - "├─TableReader(Build) 9990.00 root data:Selection", - "│ └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.t.a))", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": "[planner:1815]SEMI_JOIN_REWRITE() is inapplicable for SemiJoin with left conditions or other conditions." - } - ] - }, - { - "Name": "TestHJBuildAndProbeHint4DynamicPartitionTable", - "Cases": [ - { - "SQL": "select /*+ hash_join_build(t2) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", - "Plan": [ - "HashJoin 12475.01 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", - "├─TableReader(Build) 9980.01 root partition:all data:Selection", - "│ └─Selection 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - "└─TableReader(Probe) 9980.01 root partition:all data:Selection", - " └─Selection 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" - ], - "Result": [ - "1 1" - ], - "Warning": null - }, - { - "SQL": "select /*+ hash_join_probe(t2) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", - "Plan": [ - "HashJoin 12475.01 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", - "├─TableReader(Build) 9980.01 root partition:all data:Selection", - "│ └─Selection 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─TableReader(Probe) 9980.01 root partition:all data:Selection", - " └─Selection 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))", - " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" - ], - "Result": [ - "1 1" - ], - "Warning": null - }, - { - "SQL": "select /*+ hash_join_build(t1) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", - "Plan": [ - "HashJoin 12475.01 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", - "├─TableReader(Build) 9980.01 root partition:all data:Selection", - "│ └─Selection 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - "└─TableReader(Probe) 9980.01 root partition:all data:Selection", - " └─Selection 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))", - " └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" - ], - "Result": [ - "1 1" - ], - "Warning": null - }, - { - "SQL": "select /*+ hash_join_probe(t1) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", - "Plan": [ - "HashJoin 12475.01 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", - "├─TableReader(Build) 9980.01 root partition:all data:Selection", - "│ └─Selection 9980.01 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))", - "│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - "└─TableReader(Probe) 9980.01 root partition:all data:Selection", - " └─Selection 9980.01 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" - ], - "Result": [ - "1 1" - ], - "Warning": null - } - ] - }, - { - "Name": "TestHJBuildAndProbeHint4TiFlash", - "Cases": [ - { - "SQL": "select /*+ hash_join_build(t2) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", - "Plan": [ - "TableReader 12500.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12500.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 12500.00 mpp[tiflash] test.t1.a, test.t2.a", - " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select /*+ hash_join_probe(t2) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", - "Plan": [ - "TableReader 12500.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12500.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 12500.00 mpp[tiflash] test.t1.a, test.t2.a", - " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select /*+ hash_join_build(t1) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", - "Plan": [ - "TableReader 12500.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12500.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 12500.00 mpp[tiflash] test.t1.a, test.t2.a", - " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select /*+ hash_join_probe(t1) */ t1.a, t2.a from t1 join t2 on t1.a=t2.a and t1.b=t2.b", - "Plan": [ - "TableReader 12500.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 12500.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 12500.00 mpp[tiflash] test.t1.a, test.t2.a", - " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" - ], - "Warning": null - } - ] - }, - { - "Name": "TestCountStarForTiFlash", - "Cases": [ - { - "SQL": "select count(*) from t", - "Plan": [ - "HashAgg 1.00 root funcs:count(Column#12)->Column#10", - "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.t.d)->Column#12", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select count(1), count(3.1415), count(0), count(null) from t -- every count but count(null) can be rewritten", - "Plan": [ - "HashAgg 1.00 root funcs:count(Column#18)->Column#10, funcs:count(Column#19)->Column#11, funcs:count(Column#20)->Column#12, funcs:count(Column#21)->Column#13", - "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.t.d)->Column#18, funcs:count(test.t.d)->Column#19, funcs:count(test.t.d)->Column#20, funcs:count(NULL)->Column#21", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select count(*) from t where a=1", - "Plan": [ - "StreamAgg 1.00 root funcs:count(1)->Column#10", - "└─TableReader 10.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 10.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Selection 10.00 mpp[tiflash] eq(test.t.a, 1)", - " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select count(*) from t_pick_row_id", - "Plan": [ - "HashAgg 1.00 root funcs:count(Column#5)->Column#3", - "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.t_pick_row_id._tidb_rowid)->Column#5", - " └─TableFullScan 10000.00 mpp[tiflash] table:t_pick_row_id keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select t.b, t.c from (select count(*) as c from t) a, t where a.c=t.a -- test recursive", - "Plan": [ - "HashJoin 1.25 root inner join, equal:[eq(test.t.a, Column#10)]", - "├─HashAgg(Build) 1.00 root funcs:count(Column#22)->Column#10", - "│ └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - "│ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - "│ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.t.d)->Column#22", - "│ └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo", - "└─TableReader(Probe) 10000.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select * from t outTable where outTable.a > (select count(*) from t inn where inn.a = outTable.b) -- shouldn't be rewritten for correlated sub query", - "Plan": [ - "Projection 10000.00 root test.t.a, test.t.b, test.t.c, test.t.d, test.t.e, test.t.f, test.t.g, test.t.h", - "└─Apply 10000.00 root CARTESIAN inner join, other cond:gt(test.t.a, Column#19)", - " ├─TableReader(Build) 10000.00 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:outTable keep order:false, stats:pseudo", - " └─HashAgg(Probe) 10000.00 root funcs:count(Column#21)->Column#19", - " └─TableReader 10000.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 10000.00 mpp[tiflash] funcs:count(1)->Column#21", - " └─Selection 80000000.00 mpp[tiflash] eq(cast(test.t.a, double BINARY), cast(test.t.b, double BINARY))", - " └─TableFullScan 100000000.00 mpp[tiflash] table:inn pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select count(*) from t t1, t t2 where t1.a=t2.e -- shouldn't be rewritten when join under agg", - "Plan": [ - "HashAgg 1.00 root funcs:count(Column#20)->Column#19", - "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(1)->Column#20", - " └─Projection 12500.00 mpp[tiflash] test.t.a", - " └─HashJoin 12500.00 mpp[tiflash] inner join, equal:[eq(test.t.a, test.t.e)]", - " ├─ExchangeReceiver(Build) 10000.00 mpp[tiflash] ", - " │ └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: Broadcast, Compression: FAST", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo", - " └─TableFullScan(Probe) 10000.00 mpp[tiflash] table:t2 keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select count(distinct 1) from t -- shouldn't be rewritten", - "Plan": [ - "TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 1.00 mpp[tiflash] Column#10", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(distinct Column#12)->Column#10", - " └─ExchangeReceiver 1.00 mpp[tiflash] ", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough, Compression: FAST", - " └─HashAgg 1.00 mpp[tiflash] group by:1, ", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select count(1), count(a), count(b) from t -- keep count(1)", - "Plan": [ - "HashAgg 1.00 root funcs:count(Column#16)->Column#10, funcs:count(Column#17)->Column#11, funcs:count(Column#18)->Column#12", - "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(1)->Column#16, funcs:count(test.t.a)->Column#17, funcs:count(test.t.b)->Column#18", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select a, count(*) from t group by a -- shouldn't be rewritten", - "Plan": [ - "TableReader 8000.00 root MppVersion: 2, data:ExchangeSender", - "└─ExchangeSender 8000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection 8000.00 mpp[tiflash] test.t.a, Column#10", - " └─Projection 8000.00 mpp[tiflash] Column#10, test.t.a", - " └─HashAgg 8000.00 mpp[tiflash] group by:test.t.a, funcs:count(1)->Column#10, funcs:firstrow(test.t.a)->test.t.a", - " └─ExchangeReceiver 10000.00 mpp[tiflash] ", - " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary]", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select sum(a) from t -- sum shouldn't be rewritten", - "Plan": [ - "HashAgg 1.00 root funcs:sum(Column#12)->Column#10", - "└─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 mpp[tiflash] funcs:sum(Column#15)->Column#12", - " └─Projection 10000.00 mpp[tiflash] cast(test.t.a, decimal(10,0) BINARY)->Column#15", - " └─TableFullScan 10000.00 mpp[tiflash] table:t keep order:false, stats:pseudo" - ], - "Warning": null - } - ] - }, - { - "Name": "TestHashAggPushdownToTiFlashCompute", - "Cases": [ - { - "SQL": "select /*+ agg_to_cop() hash_agg() */ avg( distinct tbl_15.col_96 ) as r0 , min( tbl_15.col_92 ) as r1 , sum( distinct tbl_15.col_91 ) as r2 , max( tbl_15.col_92 ) as r3 from tbl_15 where tbl_15.col_94 != '2033-01-09' and tbl_15.col_93 > 7623.679908049186 order by r0,r1,r2,r3 limit 79 ;", - "Plan": [ - "Limit 1.00 root offset:0, count:79", - "└─Sort 1.00 root Column#11, Column#12, Column#13, Column#14", - " └─HashAgg 1.00 root funcs:avg(distinct Column#89)->Column#11, funcs:min(Column#90)->Column#12, funcs:sum(distinct Column#91)->Column#13, funcs:max(Column#92)->Column#14", - " └─Projection 7100.44 root cast(test.tbl_15.col_96, decimal(10,0) UNSIGNED BINARY)->Column#89, Column#15->Column#90, cast(test.tbl_15.col_91, decimal(3,0) UNSIGNED BINARY)->Column#91, Column#16->Column#92", - " └─PartitionUnion 7100.44 root ", - " ├─TableReader 1775.11 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", - " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#18)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#20)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", - " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", - " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", - " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#18, funcs:max(test.tbl_15.col_92)->Column#20", - " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p0 pushed down filter:empty, keep order:false, stats:pseudo", - " ├─TableReader 1775.11 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", - " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#30)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#32)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", - " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", - " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", - " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#30, funcs:max(test.tbl_15.col_92)->Column#32", - " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p1 pushed down filter:empty, keep order:false, stats:pseudo", - " ├─TableReader 1775.11 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", - " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#42)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#44)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", - " │ └─ExchangeReceiver 1775.11 mpp[tiflash] ", - " │ └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", - " │ └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#42, funcs:max(test.tbl_15.col_92)->Column#44", - " │ └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p2 pushed down filter:empty, keep order:false, stats:pseudo", - " └─TableReader 1775.11 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:min(Column#54)->Column#15, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91, funcs:max(Column#56)->Column#16, funcs:firstrow(test.tbl_15.col_96)->test.tbl_15.col_96, funcs:firstrow(test.tbl_15.col_91)->test.tbl_15.col_91", - " └─ExchangeReceiver 1775.11 mpp[tiflash] ", - " └─ExchangeSender 1775.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.tbl_15.col_96, collate: binary], [name: test.tbl_15.col_91, collate: binary]", - " └─HashAgg 1775.11 mpp[tiflash] group by:test.tbl_15.col_91, test.tbl_15.col_96, funcs:min(test.tbl_15.col_92)->Column#54, funcs:max(test.tbl_15.col_92)->Column#56", - " └─Selection 2218.89 mpp[tiflash] gt(test.tbl_15.col_93, 7623.679908049186), ne(test.tbl_15.col_94, 2033-01-09 00:00:00.000000)", - " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p3 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select /*+ agg_to_cop() hash_agg() */ count(1) from tbl_15 ;", - "Plan": [ - "HashAgg 1.00 root funcs:count(Column#12)->Column#11", - "└─PartitionUnion 4.00 root ", - " ├─HashAgg 1.00 root funcs:count(Column#13)->Column#12", - " │ └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#13", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p0 keep order:false, stats:pseudo", - " ├─HashAgg 1.00 root funcs:count(Column#14)->Column#12", - " │ └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#14", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p1 keep order:false, stats:pseudo", - " ├─HashAgg 1.00 root funcs:count(Column#15)->Column#12", - " │ └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " │ └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#15", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p2 keep order:false, stats:pseudo", - " └─HashAgg 1.00 root funcs:count(Column#16)->Column#12", - " └─TableReader 1.00 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 mpp[tiflash] funcs:count(test.tbl_15.col_91)->Column#16", - " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_15, partition:p3 keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select /*+ agg_to_cop() stream_agg() */ avg( tbl_16.col_100 ) as r0 from tbl_16 where tbl_16.col_100 in ( 10672141 ) or tbl_16.col_104 in ( 'yfEG1t!*b' ,'C1*bqx_qyO' ,'vQ^yUpKHr&j#~' ) group by tbl_16.col_100 order by r0 limit 20 ;", - "Plan": [ - "TopN 20.00 root Column#10, offset:0, count:20", - "└─HashAgg 63.95 root group by:test.tbl_16.col_100, funcs:avg(Column#11, Column#12)->Column#10", - " └─PartitionUnion 63.95 root ", - " ├─StreamAgg 31.98 root group by:Column#19, funcs:count(Column#19)->Column#11, funcs:sum(Column#20)->Column#12, funcs:firstrow(Column#21)->test.tbl_16.col_100", - " │ └─Projection 39.97 root test.tbl_16.col_100->Column#19, cast(test.tbl_16.col_100, decimal(8,0) UNSIGNED BINARY)->Column#20, test.tbl_16.col_100->Column#21", - " │ └─Sort 39.97 root test.tbl_16.col_100", - " │ └─TableReader 39.97 root MppVersion: 2, data:ExchangeSender", - " │ └─ExchangeSender 39.97 mpp[tiflash] ExchangeType: PassThrough", - " │ └─Selection 39.97 mpp[tiflash] or(eq(test.tbl_16.col_100, 10672141), in(test.tbl_16.col_104, \"yfEG1t!*b\", \"C1*bqx_qyO\", \"vQ^yUpKHr&j#~\"))", - " │ └─TableFullScan 10000.00 mpp[tiflash] table:tbl_16, partition:p0 pushed down filter:empty, keep order:false, stats:pseudo", - " └─StreamAgg 31.98 root group by:Column#22, funcs:count(Column#22)->Column#11, funcs:sum(Column#23)->Column#12, funcs:firstrow(Column#24)->test.tbl_16.col_100", - " └─Projection 39.97 root test.tbl_16.col_100->Column#22, cast(test.tbl_16.col_100, decimal(8,0) UNSIGNED BINARY)->Column#23, test.tbl_16.col_100->Column#24", - " └─Sort 39.97 root test.tbl_16.col_100", - " └─TableReader 39.97 root MppVersion: 2, data:ExchangeSender", - " └─ExchangeSender 39.97 mpp[tiflash] ExchangeType: PassThrough", - " └─Selection 39.97 mpp[tiflash] or(eq(test.tbl_16.col_100, 10672141), in(test.tbl_16.col_104, \"yfEG1t!*b\", \"C1*bqx_qyO\", \"vQ^yUpKHr&j#~\"))", - " └─TableFullScan 10000.00 mpp[tiflash] table:tbl_16, partition:p1 pushed down filter:empty, keep order:false, stats:pseudo" - ], - "Warning": null - } - ] - }, - { - "Name": "TestIssues49377Plan", - "Cases": [ - { - "SQL": "select 1,1,1 union all ((select * from employee where dept_id = 1) union all ( select * from employee where dept_id = 1 order by employee_id ) order by 1 );", - "Plan": [ - "Union 21.00 root ", - "├─Projection 1.00 root 1->Column#15, 1->Column#16, 1->Column#17", - "│ └─TableDual 1.00 root rows:1", - "└─Projection 20.00 root cast(Column#12, bigint(11) BINARY)->Column#15, Column#13->Column#16, cast(Column#14, bigint(11) BINARY)->Column#17", - " └─Sort 20.00 root Column#12", - " └─Union 20.00 root ", - " ├─TableReader 10.00 root data:Selection", - " │ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - " │ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", - " └─Sort 10.00 root test.employee.employee_id", - " └─TableReader 10.00 root data:Selection", - " └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - " └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select 1,1,1 union all ((select * from employee where dept_id = 1) union all ( select * from employee where dept_id = 1 order by employee_id ) order by 1 limit 1);", - "Plan": [ - "Union 2.00 root ", - "├─Projection 1.00 root 1->Column#15, 1->Column#16, 1->Column#17", - "│ └─TableDual 1.00 root rows:1", - "└─Projection 1.00 root cast(Column#12, bigint(11) BINARY)->Column#15, Column#13->Column#16, cast(Column#14, bigint(11) BINARY)->Column#17", - " └─TopN 1.00 root Column#12, offset:0, count:1", - " └─Union 2.00 root ", - " ├─TopN 1.00 root test.employee.employee_id, offset:0, count:1", - " │ └─TableReader 1.00 root data:TopN", - " │ └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", - " │ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - " │ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", - " └─TopN 1.00 root test.employee.employee_id, offset:0, count:1", - " └─TableReader 1.00 root data:TopN", - " └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", - " └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - " └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select * from employee where dept_id = 1 union all ( select * from employee where dept_id = 1 order by employee_id) union all ( select * from employee where dept_id = 1 union all ( select * from employee where dept_id = 1 order by employee_id ) limit 1);", - "Plan": [ - "Union 21.00 root ", - "├─TableReader 10.00 root data:Selection", - "│ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - "│ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", - "├─Sort 10.00 root test.employee.employee_id", - "│ └─TableReader 10.00 root data:Selection", - "│ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - "│ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", - "└─Limit 1.00 root offset:0, count:1", - " └─Union 1.00 root ", - " ├─Limit 1.00 root offset:0, count:1", - " │ └─TableReader 1.00 root data:Limit", - " │ └─Limit 1.00 cop[tikv] offset:0, count:1", - " │ └─Selection 1.00 cop[tikv] eq(test.employee.dept_id, 1)", - " │ └─TableFullScan 1000.00 cop[tikv] table:employee keep order:false, stats:pseudo", - " └─TopN 1.00 root test.employee.employee_id, offset:0, count:1", - " └─TableReader 1.00 root data:TopN", - " └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", - " └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - " └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "select * from employee where dept_id = 1 union all ( select * from employee where dept_id = 1 order by employee_id) union all ( select * from employee where dept_id = 1 union all ( select * from employee where dept_id = 1 order by employee_id ) order by 1 limit 1);", - "Plan": [ - "Union 21.00 root ", - "├─TableReader 10.00 root data:Selection", - "│ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - "│ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", - "├─Sort 10.00 root test.employee.employee_id", - "│ └─TableReader 10.00 root data:Selection", - "│ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - "│ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", - "└─TopN 1.00 root Column#17, offset:0, count:1", - " └─Union 2.00 root ", - " ├─TopN 1.00 root test.employee.employee_id, offset:0, count:1", - " │ └─TableReader 1.00 root data:TopN", - " │ └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", - " │ └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - " │ └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo", - " └─TopN 1.00 root test.employee.employee_id, offset:0, count:1", - " └─TableReader 1.00 root data:TopN", - " └─TopN 1.00 cop[tikv] test.employee.employee_id, offset:0, count:1", - " └─Selection 10.00 cop[tikv] eq(test.employee.dept_id, 1)", - " └─TableFullScan 10000.00 cop[tikv] table:employee keep order:false, stats:pseudo" - ], - "Warning": null - } - ] - }, - { - "Name": "TestPointgetIndexChoosen", - "Cases": [ - { - "SQL": "select * from t where b=1 and c='1';", - "Plan": [ - "Point_Get 1.00 root table:t, index:ubc(b, c) " - ], - "Warning": null - }, - { - "SQL": "select * from t where b=1 and c='1' and d='1';", - "Plan": [ - "Selection 0.00 root eq(test.t.d, \"1\")", - "└─Point_Get 1.00 root table:t, index:ubc(b, c) " - ], - "Warning": null - }, - { - "SQL": "select * from t where b in (1,2,3) and c in ('1');", - "Plan": [ - "Batch_Point_Get 3.00 root table:t, index:ubc(b, c) keep order:false, desc:false" - ], - "Warning": null - } - ] - }, - { - "Name": "TestAlwaysTruePredicateWithSubquery", - "Cases": [ - { - "SQL": "SHOW ERRORS WHERE TRUE = ALL ( SELECT TRUE GROUP BY 1 LIMIT 1 ) IS NULL IS NOT NULL;", - "Plan": null, - "Warning": null - }, - { - "SQL": "explain select * from t WHERE TRUE = ALL ( SELECT TRUE GROUP BY 1 LIMIT 1 ) IS NULL IS NOT NULL;", - "Plan": [ - "HashJoin_14 10000.00 root CARTESIAN inner join", - "├─StreamAgg_19(Build) 1.00 root funcs:count(1)->Column#12", - "│ └─Limit_22 1.00 root offset:0, count:1", - "│ └─HashAgg_23 1.00 root group by:1, funcs:firstrow(1)->Column#13", - "│ └─TableDual_24 1.00 root rows:1", - "└─TableReader_17(Probe) 10000.00 root data:TableFullScan_16", - " └─TableFullScan_16 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain select * from t WHERE TRUE = ALL ( SELECT TRUE from t GROUP BY 1 LIMIT 1 ) is null is not null;", - "Plan": [ - "HashJoin_14 10000.00 root CARTESIAN inner join", - "├─StreamAgg_19(Build) 1.00 root funcs:count(1)->Column#15", - "│ └─Limit_22 1.00 root offset:0, count:1", - "│ └─HashAgg_27 1.00 root group by:Column#17, funcs:firstrow(Column#18)->Column#16", - "│ └─TableReader_28 1.00 root data:HashAgg_23", - "│ └─HashAgg_23 1.00 cop[tikv] group by:1, funcs:firstrow(1)->Column#18", - "│ └─TableFullScan_26 10000.00 cop[tikv] table:t keep order:false, stats:pseudo", - "└─TableReader_17(Probe) 10000.00 root data:TableFullScan_16", - " └─TableFullScan_16 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - } - ] - }, - { - "Name": "TestExplainExpand", - "Cases": [ - { - "SQL": "explain format = 'brief' select count(1) from t group by a, b with rollup; -- 1. simple agg", - "Plan": [ - "HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:count(1)->Column#10", - "└─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' select sum(c), count(1) from t group by a, b with rollup; -- 2. non-grouping set col c", - "Plan": [ - "HashAgg 8000.00 root group by:Column#15, Column#16, Column#17, funcs:sum(Column#14)->Column#10, funcs:count(1)->Column#11", - "└─Projection 10000.00 root cast(test.t.c, decimal(10,0) BINARY)->Column#14, Column#7->Column#15, Column#8->Column#16, gid->Column#17", - " └─Expand 10000.00 root level-projection:[test.t.c, ->Column#7, ->Column#8, 0->gid],[test.t.c, Column#7, ->Column#8, 1->gid],[test.t.c, Column#7, Column#8, 3->gid]; schema: [test.t.c,Column#7,Column#8,gid]", - " └─Projection 10000.00 root test.t.c, test.t.a->Column#7, test.t.b->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' select count(a) from t group by a, b with rollup; -- 3. should keep the original col a", - "Plan": [ - "HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:count(test.t.a)->Column#10", - "└─Expand 10000.00 root level-projection:[test.t.a, ->Column#7, ->Column#8, 0->gid],[test.t.a, Column#7, ->Column#8, 1->gid],[test.t.a, Column#7, Column#8, 3->gid]; schema: [test.t.a,Column#7,Column#8,gid]", - " └─Projection 10000.00 root test.t.a, test.t.a->Column#7, test.t.b->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' select grouping(a) from t group by a, b with rollup; -- 4. contain grouping function ref to grouping set column a", - "Plan": [ - "Projection 8000.00 root grouping(gid)->Column#11", - "└─HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:firstrow(gid)->gid", - " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' select grouping(a,b) from t group by a, b with rollup; -- 5. grouping function contains grouping set column a,c", - "Plan": [ - "Projection 8000.00 root grouping(gid)->Column#11", - "└─HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:firstrow(gid)->gid", - " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' select a, grouping(b,a) from t group by a,b with rollup; -- 6. resolve normal column a to grouping set column a'", - "Plan": [ - "Projection 8000.00 root Column#7->Column#11, grouping(gid)->Column#12", - "└─HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(gid)->gid", - " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' select a+1, grouping(b) from t group by a+1, b with rollup; -- 7. resolve field list a+1 to grouping set column a+1", - "Plan": [ - "Projection 8000.00 root Column#7->Column#11, grouping(gid)->Column#12", - "└─HashAgg 8000.00 root group by:Column#7, Column#8, gid, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(gid)->gid", - " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, 0->gid],[Column#7, ->Column#8, 1->gid],[Column#7, Column#8, 3->gid]; schema: [Column#7,Column#8,gid]", - " └─Projection 10000.00 root plus(test.t.a, 1)->Column#7, test.t.b->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT SUM(profit) AS profit FROM sales GROUP BY year+2, year+profit WITH ROLLUP order by year+2; -- 8. order by item year+2 resolve to gby grouping expression", - "Plan": [ - "Projection 8000.00 root Column#10", - "└─Sort 8000.00 root Column#7", - " └─HashAgg 8000.00 root group by:Column#15, Column#16, Column#17, funcs:sum(Column#14)->Column#10, funcs:firstrow(Column#15)->Column#7", - " └─Projection 10000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#14, Column#7->Column#15, Column#8->Column#16, gid->Column#17", - " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid],[test.sales.profit, Column#7, ->Column#8, 1->gid],[test.sales.profit, Column#7, Column#8, 3->gid]; schema: [test.sales.profit,Column#7,Column#8,gid]", - " └─Projection 10000.00 root test.sales.profit, plus(test.sales.year, 2)->Column#7, plus(test.sales.year, test.sales.profit)->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT year+2, SUM(profit) AS profit FROM sales GROUP BY year+2, year+profit WITH ROLLUP order by year+2; -- 9. order by item year+2 resolve to select field", - "Plan": [ - "Projection 8000.00 root Column#7->Column#11, Column#10", - "└─Sort 8000.00 root Column#7", - " └─HashAgg 8000.00 root group by:Column#16, Column#17, Column#18, funcs:sum(Column#15)->Column#10, funcs:firstrow(Column#16)->Column#7", - " └─Projection 10000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#15, Column#7->Column#16, Column#8->Column#17, gid->Column#18", - " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid],[test.sales.profit, Column#7, ->Column#8, 1->gid],[test.sales.profit, Column#7, Column#8, 3->gid]; schema: [test.sales.profit,Column#7,Column#8,gid]", - " └─Projection 10000.00 root test.sales.profit, plus(test.sales.year, 2)->Column#7, plus(test.sales.year, test.sales.profit)->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT year+2 as y, SUM(profit) as profit FROM sales GROUP BY year+2, year+profit WITH ROLLUP having y > 2002 order by year+2, profit; -- 10. having (year+2) shouldn't be pushed down", - "Plan": [ - "Projection 6400.00 root Column#7, Column#10", - "└─Sort 6400.00 root Column#7, Column#10", - " └─HashAgg 6400.00 root group by:Column#16, Column#17, Column#18, funcs:sum(Column#15)->Column#10, funcs:firstrow(Column#16)->Column#7", - " └─Projection 8000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#15, Column#7->Column#16, Column#8->Column#17, gid->Column#18", - " └─Selection 8000.00 root gt(Column#7, 2002)", - " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid],[test.sales.profit, Column#7, ->Column#8, 1->gid],[test.sales.profit, Column#7, Column#8, 3->gid]; schema: [test.sales.profit,Column#7,Column#8,gid]", - " └─Projection 10000.00 root test.sales.profit, plus(test.sales.year, 2)->Column#7, plus(test.sales.year, test.sales.profit)->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT year+2 as y, SUM(profit) AS profit, grouping(year+2) FROM sales GROUP BY year+2, year+profit WITH ROLLUP having y > 2002 order by year+2, profit; -- 11. grouping function validation", - "Plan": [ - "Sort 6400.00 root Column#7, Column#10", - "└─Projection 6400.00 root Column#7, Column#10, grouping(gid)->Column#11", - " └─HashAgg 6400.00 root group by:Column#19, Column#20, Column#21, funcs:sum(Column#18)->Column#10, funcs:firstrow(Column#19)->Column#7, funcs:firstrow(Column#20)->gid", - " └─Projection 8000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#18, Column#7->Column#19, gid->Column#20, Column#8->Column#21", - " └─Selection 8000.00 root gt(Column#7, 2002)", - " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid],[test.sales.profit, Column#7, ->Column#8, 1->gid],[test.sales.profit, Column#7, Column#8, 3->gid]; schema: [test.sales.profit,Column#7,Column#8,gid]", - " └─Projection 10000.00 root test.sales.profit, plus(test.sales.year, 2)->Column#7, plus(test.sales.year, test.sales.profit)->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT year, country, product, SUM(profit) AS profit FROM sales GROUP BY year, country, product with rollup order by grouping(year); -- 12. grouping function in order by clause", - "Plan": [ - "Projection 8000.00 root Column#7, Column#8->Column#13, Column#9->Column#14, Column#11", - "└─Projection 8000.00 root Column#11, Column#7, Column#8, Column#9, gid", - " └─Sort 8000.00 root Column#21", - " └─Projection 8000.00 root Column#11, Column#7, Column#8, Column#9, gid, grouping(gid)->Column#21", - " └─HashAgg 8000.00 root group by:Column#17, Column#18, Column#19, Column#20, funcs:sum(Column#16)->Column#11, funcs:firstrow(Column#17)->Column#7, funcs:firstrow(Column#18)->Column#8, funcs:firstrow(Column#19)->Column#9, funcs:firstrow(Column#20)->gid", - " └─Projection 10000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#16, Column#7->Column#17, Column#8->Column#18, Column#9->Column#19, gid->Column#20", - " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, ->Column#9, 0->gid],[test.sales.profit, Column#7, ->Column#8, ->Column#9, 1->gid],[test.sales.profit, Column#7, Column#8, ->Column#9, 3->gid],[test.sales.profit, Column#7, Column#8, Column#9, 7->gid]; schema: [test.sales.profit,Column#7,Column#8,Column#9,gid]", - " └─Projection 10000.00 root test.sales.profit, test.sales.year->Column#7, test.sales.country->Column#8, test.sales.product->Column#9", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT country, product, SUM(profit) AS profit FROM sales GROUP BY country, country, product with rollup order by grouping(country); -- 13. 12 under gpos case", - "Plan": [ - "Projection 8000.00 root Column#7, Column#8->Column#13, Column#11", - "└─Projection 8000.00 root Column#11, Column#7, Column#8, gid", - " └─Sort 8000.00 root Column#20", - " └─Projection 8000.00 root Column#11, Column#7, Column#8, gid, grouping(gid)->Column#20", - " └─HashAgg 8000.00 root group by:Column#16, Column#16, Column#17, Column#18, Column#19, funcs:sum(Column#15)->Column#11, funcs:firstrow(Column#16)->Column#7, funcs:firstrow(Column#17)->Column#8, funcs:firstrow(Column#18)->gid", - " └─Projection 10000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#15, Column#7->Column#16, Column#8->Column#17, gid->Column#18, gpos->Column#19", - " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid, 0->gpos],[test.sales.profit, Column#7, ->Column#8, 1->gid, 1->gpos],[test.sales.profit, Column#7, ->Column#8, 1->gid, 2->gpos],[test.sales.profit, Column#7, Column#8, 3->gid, 3->gpos]; schema: [test.sales.profit,Column#7,Column#8,gid,gpos]", - " └─Projection 10000.00 root test.sales.profit, test.sales.country->Column#7, test.sales.product->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT year, country, product, SUM(profit) AS profit FROM sales GROUP BY year, country, product with rollup having grouping(year) > 0 order by grouping(year); -- 14. grouping function in having clause", - "Plan": [ - "Projection 6400.00 root Column#7, Column#8->Column#13, Column#9->Column#14, Column#11", - "└─Projection 6400.00 root Column#11, Column#7, Column#8, Column#9, gid", - " └─Sort 6400.00 root Column#21", - " └─Projection 6400.00 root Column#11, Column#7, Column#8, Column#9, gid, grouping(gid)->Column#21", - " └─HashAgg 6400.00 root group by:Column#17, Column#18, Column#19, Column#20, funcs:sum(Column#16)->Column#11, funcs:firstrow(Column#17)->Column#7, funcs:firstrow(Column#18)->Column#8, funcs:firstrow(Column#19)->Column#9, funcs:firstrow(Column#20)->gid", - " └─Projection 8000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#16, Column#7->Column#17, Column#8->Column#18, Column#9->Column#19, gid->Column#20", - " └─Selection 8000.00 root gt(grouping(gid), 0)", - " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, ->Column#9, 0->gid],[test.sales.profit, Column#7, ->Column#8, ->Column#9, 1->gid],[test.sales.profit, Column#7, Column#8, ->Column#9, 3->gid],[test.sales.profit, Column#7, Column#8, Column#9, 7->gid]; schema: [test.sales.profit,Column#7,Column#8,Column#9,gid]", - " └─Projection 10000.00 root test.sales.profit, test.sales.year->Column#7, test.sales.country->Column#8, test.sales.product->Column#9", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT country, product, SUM(profit) AS profit FROM sales GROUP BY country, country, product with rollup having grouping(country) > 0 order by grouping(country); -- 15. 14 under gpos case", - "Plan": [ - "Projection 6400.00 root Column#7, Column#8->Column#13, Column#11", - "└─Projection 6400.00 root Column#11, Column#7, Column#8, gid", - " └─Sort 6400.00 root Column#20", - " └─Projection 6400.00 root Column#11, Column#7, Column#8, gid, grouping(gid)->Column#20", - " └─HashAgg 6400.00 root group by:Column#16, Column#16, Column#17, Column#18, Column#19, funcs:sum(Column#15)->Column#11, funcs:firstrow(Column#16)->Column#7, funcs:firstrow(Column#17)->Column#8, funcs:firstrow(Column#18)->gid", - " └─Projection 8000.00 root cast(test.sales.profit, decimal(10,0) BINARY)->Column#15, Column#7->Column#16, Column#8->Column#17, gid->Column#18, gpos->Column#19", - " └─Selection 8000.00 root gt(grouping(gid), 0)", - " └─Expand 10000.00 root level-projection:[test.sales.profit, ->Column#7, ->Column#8, 0->gid, 0->gpos],[test.sales.profit, Column#7, ->Column#8, 1->gid, 1->gpos],[test.sales.profit, Column#7, ->Column#8, 1->gid, 2->gpos],[test.sales.profit, Column#7, Column#8, 3->gid, 3->gpos]; schema: [test.sales.profit,Column#7,Column#8,gid,gpos]", - " └─Projection 10000.00 root test.sales.profit, test.sales.country->Column#7, test.sales.product->Column#8", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - }, - { - "SQL": "explain format = 'brief' SELECT year, country, product, grouping(year, country, product) from sales group by year, country, product with rollup having grouping(year, country, product) <> 0; -- 16. grouping function recreating fix", - "Plan": [ - "Projection 6400.00 root Column#7->Column#12, Column#8->Column#13, Column#9->Column#14, grouping(gid)->Column#15", - "└─HashAgg 6400.00 root group by:Column#7, Column#8, Column#9, gid, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(Column#8)->Column#8, funcs:firstrow(Column#9)->Column#9, funcs:firstrow(gid)->gid", - " └─Selection 8000.00 root ne(grouping(gid), 0)", - " └─Expand 10000.00 root level-projection:[->Column#7, ->Column#8, ->Column#9, 0->gid],[Column#7, ->Column#8, ->Column#9, 1->gid],[Column#7, Column#8, ->Column#9, 3->gid],[Column#7, Column#8, Column#9, 7->gid]; schema: [Column#7,Column#8,Column#9,gid]", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:sales keep order:false, stats:pseudo" - ], - "Warning": null - } - ] - } -] diff --git a/pkg/planner/core/find_best_task.go b/pkg/planner/core/find_best_task.go deleted file mode 100644 index c098a8320b796..0000000000000 --- a/pkg/planner/core/find_best_task.go +++ /dev/null @@ -1,3016 +0,0 @@ -// Copyright 2017 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core - -import ( - "cmp" - "fmt" - "math" - "slices" - "strings" - - "github.com/pingcap/errors" - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/expression" - "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/meta/model" - "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/cardinality" - "github.com/pingcap/tidb/pkg/planner/core/base" - "github.com/pingcap/tidb/pkg/planner/core/cost" - "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" - "github.com/pingcap/tidb/pkg/planner/property" - "github.com/pingcap/tidb/pkg/planner/util" - "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" - "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" - "github.com/pingcap/tidb/pkg/planner/util/utilfuncp" - "github.com/pingcap/tidb/pkg/statistics" - "github.com/pingcap/tidb/pkg/types" - tidbutil "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/chunk" - "github.com/pingcap/tidb/pkg/util/collate" - h "github.com/pingcap/tidb/pkg/util/hint" - "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/ranger" - "github.com/pingcap/tidb/pkg/util/tracing" - "go.uber.org/zap" -) - -// PlanCounterDisabled is the default value of PlanCounterTp, indicating that optimizer needn't force a plan. -var PlanCounterDisabled base.PlanCounterTp = -1 - -// GetPropByOrderByItems will check if this sort property can be pushed or not. In order to simplify the problem, we only -// consider the case that all expression are columns. -func GetPropByOrderByItems(items []*util.ByItems) (*property.PhysicalProperty, bool) { - propItems := make([]property.SortItem, 0, len(items)) - for _, item := range items { - col, ok := item.Expr.(*expression.Column) - if !ok { - return nil, false - } - propItems = append(propItems, property.SortItem{Col: col, Desc: item.Desc}) - } - return &property.PhysicalProperty{SortItems: propItems}, true -} - -// GetPropByOrderByItemsContainScalarFunc will check if this sort property can be pushed or not. In order to simplify the -// problem, we only consider the case that all expression are columns or some special scalar functions. -func GetPropByOrderByItemsContainScalarFunc(items []*util.ByItems) (*property.PhysicalProperty, bool, bool) { - propItems := make([]property.SortItem, 0, len(items)) - onlyColumn := true - for _, item := range items { - switch expr := item.Expr.(type) { - case *expression.Column: - propItems = append(propItems, property.SortItem{Col: expr, Desc: item.Desc}) - case *expression.ScalarFunction: - col, desc := expr.GetSingleColumn(item.Desc) - if col == nil { - return nil, false, false - } - propItems = append(propItems, property.SortItem{Col: col, Desc: desc}) - onlyColumn = false - default: - return nil, false, false - } - } - return &property.PhysicalProperty{SortItems: propItems}, true, onlyColumn -} - -func findBestTask4LogicalTableDual(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, opt *optimizetrace.PhysicalOptimizeOp) (base.Task, int64, error) { - p := lp.(*logicalop.LogicalTableDual) - // If the required property is not empty and the row count > 1, - // we cannot ensure this required property. - // But if the row count is 0 or 1, we don't need to care about the property. - if (!prop.IsSortItemEmpty() && p.RowCount > 1) || planCounter.Empty() { - return base.InvalidTask, 0, nil - } - dual := PhysicalTableDual{ - RowCount: p.RowCount, - }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset()) - dual.SetSchema(p.Schema()) - planCounter.Dec(1) - utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, p, dual, prop) - rt := &RootTask{} - rt.SetPlan(dual) - rt.SetEmpty(p.RowCount == 0) - return rt, 1, nil -} - -func findBestTask4LogicalShow(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, _ *optimizetrace.PhysicalOptimizeOp) (base.Task, int64, error) { - p := lp.(*logicalop.LogicalShow) - if !prop.IsSortItemEmpty() || planCounter.Empty() { - return base.InvalidTask, 0, nil - } - pShow := PhysicalShow{ShowContents: p.ShowContents, Extractor: p.Extractor}.Init(p.SCtx()) - pShow.SetSchema(p.Schema()) - planCounter.Dec(1) - rt := &RootTask{} - rt.SetPlan(pShow) - return rt, 1, nil -} - -func findBestTask4LogicalShowDDLJobs(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, _ *optimizetrace.PhysicalOptimizeOp) (base.Task, int64, error) { - p := lp.(*logicalop.LogicalShowDDLJobs) - if !prop.IsSortItemEmpty() || planCounter.Empty() { - return base.InvalidTask, 0, nil - } - pShow := PhysicalShowDDLJobs{JobNumber: p.JobNumber}.Init(p.SCtx()) - pShow.SetSchema(p.Schema()) - planCounter.Dec(1) - rt := &RootTask{} - rt.SetPlan(pShow) - return rt, 1, nil -} - -// rebuildChildTasks rebuilds the childTasks to make the clock_th combination. -func rebuildChildTasks(p *logicalop.BaseLogicalPlan, childTasks *[]base.Task, pp base.PhysicalPlan, childCnts []int64, planCounter int64, ts uint64, opt *optimizetrace.PhysicalOptimizeOp) error { - // The taskMap of children nodes should be rolled back first. - for _, child := range p.Children() { - child.RollBackTaskMap(ts) - } - - multAll := int64(1) - var curClock base.PlanCounterTp - for _, x := range childCnts { - multAll *= x - } - *childTasks = (*childTasks)[:0] - for j, child := range p.Children() { - multAll /= childCnts[j] - curClock = base.PlanCounterTp((planCounter-1)/multAll + 1) - childTask, _, err := child.FindBestTask(pp.GetChildReqProps(j), &curClock, opt) - planCounter = (planCounter-1)%multAll + 1 - if err != nil { - return err - } - if curClock != 0 { - return errors.Errorf("PlanCounterTp planCounter is not handled") - } - if childTask != nil && childTask.Invalid() { - return errors.Errorf("The current plan is invalid, please skip this plan") - } - *childTasks = append(*childTasks, childTask) - } - return nil -} - -func enumeratePhysicalPlans4Task( - p *logicalop.BaseLogicalPlan, - physicalPlans []base.PhysicalPlan, - prop *property.PhysicalProperty, - addEnforcer bool, - planCounter *base.PlanCounterTp, - opt *optimizetrace.PhysicalOptimizeOp, -) (base.Task, int64, error) { - var bestTask base.Task = base.InvalidTask - var curCntPlan, cntPlan int64 - var err error - childTasks := make([]base.Task, 0, p.ChildLen()) - childCnts := make([]int64, p.ChildLen()) - cntPlan = 0 - iteration := iteratePhysicalPlan4BaseLogical - if _, ok := p.Self().(*logicalop.LogicalSequence); ok { - iteration = iterateChildPlan4LogicalSequence - } - - for _, pp := range physicalPlans { - timeStampNow := p.GetLogicalTS4TaskMap() - savedPlanID := p.SCtx().GetSessionVars().PlanID.Load() - - childTasks, curCntPlan, childCnts, err = iteration(p, pp, childTasks, childCnts, prop, opt) - if err != nil { - return nil, 0, err - } - - // This check makes sure that there is no invalid child task. - if len(childTasks) != p.ChildLen() { - continue - } - - // If the target plan can be found in this physicalPlan(pp), rebuild childTasks to build the corresponding combination. - if planCounter.IsForce() && int64(*planCounter) <= curCntPlan { - p.SCtx().GetSessionVars().PlanID.Store(savedPlanID) - curCntPlan = int64(*planCounter) - err := rebuildChildTasks(p, &childTasks, pp, childCnts, int64(*planCounter), timeStampNow, opt) - if err != nil { - return nil, 0, err - } - } - - // Combine the best child tasks with parent physical plan. - curTask := pp.Attach2Task(childTasks...) - if curTask.Invalid() { - continue - } - - // An optimal task could not satisfy the property, so it should be converted here. - if _, ok := curTask.(*RootTask); !ok && prop.TaskTp == property.RootTaskType { - curTask = curTask.ConvertToRootTask(p.SCtx()) - } - - // Enforce curTask property - if addEnforcer { - curTask = enforceProperty(prop, curTask, p.Plan.SCtx()) - } - - // Optimize by shuffle executor to running in parallel manner. - if _, isMpp := curTask.(*MppTask); !isMpp && prop.IsSortItemEmpty() { - // Currently, we do not regard shuffled plan as a new plan. - curTask = optimizeByShuffle(curTask, p.Plan.SCtx()) - } - - cntPlan += curCntPlan - planCounter.Dec(curCntPlan) - - if planCounter.Empty() { - bestTask = curTask - break - } - utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, p, curTask.Plan(), prop) - // Get the most efficient one. - if curIsBetter, err := compareTaskCost(curTask, bestTask, opt); err != nil { - return nil, 0, err - } else if curIsBetter { - bestTask = curTask - } - } - return bestTask, cntPlan, nil -} - -// iteratePhysicalPlan4BaseLogical is used to iterate the physical plan and get all child tasks. -func iteratePhysicalPlan4BaseLogical( - p *logicalop.BaseLogicalPlan, - selfPhysicalPlan base.PhysicalPlan, - childTasks []base.Task, - childCnts []int64, - _ *property.PhysicalProperty, - opt *optimizetrace.PhysicalOptimizeOp, -) ([]base.Task, int64, []int64, error) { - // Find best child tasks firstly. - childTasks = childTasks[:0] - // The curCntPlan records the number of possible plans for pp - curCntPlan := int64(1) - for j, child := range p.Children() { - childProp := selfPhysicalPlan.GetChildReqProps(j) - childTask, cnt, err := child.FindBestTask(childProp, &PlanCounterDisabled, opt) - childCnts[j] = cnt - if err != nil { - return nil, 0, childCnts, err - } - curCntPlan = curCntPlan * cnt - if childTask != nil && childTask.Invalid() { - return nil, 0, childCnts, nil - } - childTasks = append(childTasks, childTask) - } - - // This check makes sure that there is no invalid child task. - if len(childTasks) != p.ChildLen() { - return nil, 0, childCnts, nil - } - return childTasks, curCntPlan, childCnts, nil -} - -// iterateChildPlan4LogicalSequence does the special part for sequence. We need to iterate its child one by one to check whether the former child is a valid plan and then go to the nex -func iterateChildPlan4LogicalSequence( - p *logicalop.BaseLogicalPlan, - selfPhysicalPlan base.PhysicalPlan, - childTasks []base.Task, - childCnts []int64, - prop *property.PhysicalProperty, - opt *optimizetrace.PhysicalOptimizeOp, -) ([]base.Task, int64, []int64, error) { - // Find best child tasks firstly. - childTasks = childTasks[:0] - // The curCntPlan records the number of possible plans for pp - curCntPlan := int64(1) - lastIdx := p.ChildLen() - 1 - for j := 0; j < lastIdx; j++ { - child := p.Children()[j] - childProp := selfPhysicalPlan.GetChildReqProps(j) - childTask, cnt, err := child.FindBestTask(childProp, &PlanCounterDisabled, opt) - childCnts[j] = cnt - if err != nil { - return nil, 0, nil, err - } - curCntPlan = curCntPlan * cnt - if childTask != nil && childTask.Invalid() { - return nil, 0, nil, nil - } - _, isMpp := childTask.(*MppTask) - if !isMpp && prop.IsFlashProp() { - break - } - childTasks = append(childTasks, childTask) - } - // This check makes sure that there is no invalid child task. - if len(childTasks) != p.ChildLen()-1 { - return nil, 0, nil, nil - } - - lastChildProp := selfPhysicalPlan.GetChildReqProps(lastIdx).CloneEssentialFields() - if lastChildProp.IsFlashProp() { - lastChildProp.CTEProducerStatus = property.AllCTECanMpp - } - lastChildTask, cnt, err := p.Children()[lastIdx].FindBestTask(lastChildProp, &PlanCounterDisabled, opt) - childCnts[lastIdx] = cnt - if err != nil { - return nil, 0, nil, err - } - curCntPlan = curCntPlan * cnt - if lastChildTask != nil && lastChildTask.Invalid() { - return nil, 0, nil, nil - } - - if _, ok := lastChildTask.(*MppTask); !ok && lastChildProp.CTEProducerStatus == property.AllCTECanMpp { - return nil, 0, nil, nil - } - - childTasks = append(childTasks, lastChildTask) - return childTasks, curCntPlan, childCnts, nil -} - -// compareTaskCost compares cost of curTask and bestTask and returns whether curTask's cost is smaller than bestTask's. -func compareTaskCost(curTask, bestTask base.Task, op *optimizetrace.PhysicalOptimizeOp) (curIsBetter bool, err error) { - curCost, curInvalid, err := utilfuncp.GetTaskPlanCost(curTask, op) - if err != nil { - return false, err - } - bestCost, bestInvalid, err := utilfuncp.GetTaskPlanCost(bestTask, op) - if err != nil { - return false, err - } - if curInvalid { - return false, nil - } - if bestInvalid { - return true, nil - } - return curCost < bestCost, nil -} - -// getTaskPlanCost returns the cost of this task. -// The new cost interface will be used if EnableNewCostInterface is true. -// The second returned value indicates whether this task is valid. -func getTaskPlanCost(t base.Task, pop *optimizetrace.PhysicalOptimizeOp) (float64, bool, error) { - if t.Invalid() { - return math.MaxFloat64, true, nil - } - - // use the new cost interface - var ( - taskType property.TaskType - indexPartialCost float64 - ) - switch t.(type) { - case *RootTask: - taskType = property.RootTaskType - case *CopTask: // no need to know whether the task is single-read or double-read, so both CopSingleReadTaskType and CopDoubleReadTaskType are OK - cop := t.(*CopTask) - if cop.indexPlan != nil && cop.tablePlan != nil { // handle IndexLookup specially - taskType = property.CopMultiReadTaskType - // keep compatible with the old cost interface, for CopMultiReadTask, the cost is idxCost + tblCost. - if !cop.indexPlanFinished { // only consider index cost in this case - idxCost, err := getPlanCost(cop.indexPlan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) - return idxCost, false, err - } - // consider both sides - idxCost, err := getPlanCost(cop.indexPlan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) - if err != nil { - return 0, false, err - } - tblCost, err := getPlanCost(cop.tablePlan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) - if err != nil { - return 0, false, err - } - return idxCost + tblCost, false, nil - } - - taskType = property.CopSingleReadTaskType - - // TiFlash can run cop task as well, check whether this cop task will run on TiKV or TiFlash. - if cop.tablePlan != nil { - leafNode := cop.tablePlan - for len(leafNode.Children()) > 0 { - leafNode = leafNode.Children()[0] - } - if tblScan, isScan := leafNode.(*PhysicalTableScan); isScan && tblScan.StoreType == kv.TiFlash { - taskType = property.MppTaskType - } - } - - // Detail reason ref about comment in function `convertToIndexMergeScan` - // for cop task with {indexPlan=nil, tablePlan=xxx, idxMergePartPlans=[x,x,x], indexPlanFinished=true} we should - // plus the partial index plan cost into the final cost. Because t.plan() the below code used only calculate the - // cost about table plan. - if cop.indexPlanFinished && len(cop.idxMergePartPlans) != 0 { - for _, partialScan := range cop.idxMergePartPlans { - partialCost, err := getPlanCost(partialScan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) - if err != nil { - return 0, false, err - } - indexPartialCost += partialCost - } - } - case *MppTask: - taskType = property.MppTaskType - default: - return 0, false, errors.New("unknown task type") - } - if t.Plan() == nil { - // It's a very special case for index merge case. - // t.plan() == nil in index merge COP case, it means indexPlanFinished is false in other words. - cost := 0.0 - copTsk := t.(*CopTask) - for _, partialScan := range copTsk.idxMergePartPlans { - partialCost, err := getPlanCost(partialScan, taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) - if err != nil { - return 0, false, err - } - cost += partialCost - } - return cost, false, nil - } - cost, err := getPlanCost(t.Plan(), taskType, optimizetrace.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) - return cost + indexPartialCost, false, err -} - -func appendCandidate4PhysicalOptimizeOp(pop *optimizetrace.PhysicalOptimizeOp, lp base.LogicalPlan, pp base.PhysicalPlan, prop *property.PhysicalProperty) { - if pop == nil || pop.GetTracer() == nil || pp == nil { - return - } - candidate := &tracing.CandidatePlanTrace{ - PlanTrace: &tracing.PlanTrace{TP: pp.TP(), ID: pp.ID(), - ExplainInfo: pp.ExplainInfo(), ProperType: prop.String()}, - MappingLogicalPlan: tracing.CodecPlanName(lp.TP(), lp.ID())} - pop.GetTracer().AppendCandidate(candidate) - - // for PhysicalIndexMergeJoin/PhysicalIndexHashJoin/PhysicalIndexJoin, it will use innerTask as a child instead of calling findBestTask, - // and innerTask.plan() will be appended to planTree in appendChildCandidate using empty MappingLogicalPlan field, so it won't mapping with the logic plan, - // that will cause no physical plan when the logic plan got selected. - // the fix to add innerTask.plan() to planTree and mapping correct logic plan - index := -1 - var plan base.PhysicalPlan - switch join := pp.(type) { - case *PhysicalIndexMergeJoin: - index = join.InnerChildIdx - plan = join.innerPlan - case *PhysicalIndexHashJoin: - index = join.InnerChildIdx - plan = join.innerPlan - case *PhysicalIndexJoin: - index = join.InnerChildIdx - plan = join.innerPlan - } - if index != -1 { - child := lp.(*logicalop.BaseLogicalPlan).Children()[index] - candidate := &tracing.CandidatePlanTrace{ - PlanTrace: &tracing.PlanTrace{TP: plan.TP(), ID: plan.ID(), - ExplainInfo: plan.ExplainInfo(), ProperType: prop.String()}, - MappingLogicalPlan: tracing.CodecPlanName(child.TP(), child.ID())} - pop.GetTracer().AppendCandidate(candidate) - } - pp.AppendChildCandidate(pop) -} - -func appendPlanCostDetail4PhysicalOptimizeOp(pop *optimizetrace.PhysicalOptimizeOp, detail *tracing.PhysicalPlanCostDetail) { - if pop == nil || pop.GetTracer() == nil { - return - } - pop.GetTracer().PhysicalPlanCostDetails[fmt.Sprintf("%v_%v", detail.GetPlanType(), detail.GetPlanID())] = detail -} - -// findBestTask is key workflow that drive logic plan tree to generate optimal physical ones. -// The logic inside it is mainly about physical plan numeration and task encapsulation, it should -// be defined in core pkg, and be called by logic plan in their logic interface implementation. -func findBestTask(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, - opt *optimizetrace.PhysicalOptimizeOp) (bestTask base.Task, cntPlan int64, err error) { - p := lp.GetBaseLogicalPlan().(*logicalop.BaseLogicalPlan) - // If p is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself, - // and set inner child prop nil, so here we do nothing. - if prop == nil { - return nil, 1, nil - } - // Look up the task with this prop in the task map. - // It's used to reduce double counting. - bestTask = p.GetTask(prop) - if bestTask != nil { - planCounter.Dec(1) - return bestTask, 1, nil - } - - canAddEnforcer := prop.CanAddEnforcer - - if prop.TaskTp != property.RootTaskType && !prop.IsFlashProp() { - // Currently all plan cannot totally push down to TiKV. - p.StoreTask(prop, base.InvalidTask) - return base.InvalidTask, 0, nil - } - - cntPlan = 0 - // prop should be read only because its cached hashcode might be not consistent - // when it is changed. So we clone a new one for the temporary changes. - newProp := prop.CloneEssentialFields() - var plansFitsProp, plansNeedEnforce []base.PhysicalPlan - var hintWorksWithProp bool - // Maybe the plan can satisfy the required property, - // so we try to get the task without the enforced sort first. - plansFitsProp, hintWorksWithProp, err = p.Self().ExhaustPhysicalPlans(newProp) - if err != nil { - return nil, 0, err - } - if !hintWorksWithProp && !newProp.IsSortItemEmpty() { - // If there is a hint in the plan and the hint cannot satisfy the property, - // we enforce this property and try to generate the PhysicalPlan again to - // make sure the hint can work. - canAddEnforcer = true - } - - if canAddEnforcer { - // Then, we use the empty property to get physicalPlans and - // try to get the task with an enforced sort. - newProp.SortItems = []property.SortItem{} - newProp.SortItemsForPartition = []property.SortItem{} - newProp.ExpectedCnt = math.MaxFloat64 - newProp.MPPPartitionCols = nil - newProp.MPPPartitionTp = property.AnyType - var hintCanWork bool - plansNeedEnforce, hintCanWork, err = p.Self().ExhaustPhysicalPlans(newProp) - if err != nil { - return nil, 0, err - } - if hintCanWork && !hintWorksWithProp { - // If the hint can work with the empty property, but cannot work with - // the required property, we give up `plansFitProp` to make sure the hint - // can work. - plansFitsProp = nil - } - if !hintCanWork && !hintWorksWithProp && !prop.CanAddEnforcer { - // If the original property is not enforced and hint cannot - // work anyway, we give up `plansNeedEnforce` for efficiency, - plansNeedEnforce = nil - } - newProp = prop - } - - var cnt int64 - var curTask base.Task - if bestTask, cnt, err = enumeratePhysicalPlans4Task(p, plansFitsProp, newProp, false, planCounter, opt); err != nil { - return nil, 0, err - } - cntPlan += cnt - if planCounter.Empty() { - goto END - } - - curTask, cnt, err = enumeratePhysicalPlans4Task(p, plansNeedEnforce, newProp, true, planCounter, opt) - if err != nil { - return nil, 0, err - } - cntPlan += cnt - if planCounter.Empty() { - bestTask = curTask - goto END - } - utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, p, curTask.Plan(), prop) - if curIsBetter, err := compareTaskCost(curTask, bestTask, opt); err != nil { - return nil, 0, err - } else if curIsBetter { - bestTask = curTask - } - -END: - p.StoreTask(prop, bestTask) - return bestTask, cntPlan, nil -} - -func findBestTask4LogicalMemTable(lp base.LogicalPlan, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, opt *optimizetrace.PhysicalOptimizeOp) (t base.Task, cntPlan int64, err error) { - p := lp.(*logicalop.LogicalMemTable) - if prop.MPPPartitionTp != property.AnyType { - return base.InvalidTask, 0, nil - } - - // If prop.CanAddEnforcer is true, the prop.SortItems need to be set nil for p.findBestTask. - // Before function return, reset it for enforcing task prop. - oldProp := prop.CloneEssentialFields() - if prop.CanAddEnforcer { - // First, get the bestTask without enforced prop - prop.CanAddEnforcer = false - cnt := int64(0) - t, cnt, err = p.FindBestTask(prop, planCounter, opt) - if err != nil { - return nil, 0, err - } - prop.CanAddEnforcer = true - if t != base.InvalidTask { - cntPlan = cnt - return - } - // Next, get the bestTask with enforced prop - prop.SortItems = []property.SortItem{} - } - defer func() { - if err != nil { - return - } - if prop.CanAddEnforcer { - *prop = *oldProp - t = enforceProperty(prop, t, p.Plan.SCtx()) - prop.CanAddEnforcer = true - } - }() - - if !prop.IsSortItemEmpty() || planCounter.Empty() { - return base.InvalidTask, 0, nil - } - memTable := PhysicalMemTable{ - DBName: p.DBName, - Table: p.TableInfo, - Columns: p.Columns, - Extractor: p.Extractor, - QueryTimeRange: p.QueryTimeRange, - }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset()) - memTable.SetSchema(p.Schema()) - planCounter.Dec(1) - utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, p, memTable, prop) - rt := &RootTask{} - rt.SetPlan(memTable) - return rt, 1, nil -} - -// tryToGetDualTask will check if the push down predicate has false constant. If so, it will return table dual. -func tryToGetDualTask(ds *DataSource) (base.Task, error) { - for _, cond := range ds.PushedDownConds { - if con, ok := cond.(*expression.Constant); ok && con.DeferredExpr == nil && con.ParamMarker == nil { - result, _, err := expression.EvalBool(ds.SCtx().GetExprCtx().GetEvalCtx(), []expression.Expression{cond}, chunk.Row{}) - if err != nil { - return nil, err - } - if !result { - dual := PhysicalTableDual{}.Init(ds.SCtx(), ds.StatsInfo(), ds.QueryBlockOffset()) - dual.SetSchema(ds.Schema()) - rt := &RootTask{} - rt.SetPlan(dual) - return rt, nil - } - } - } - return nil, nil -} - -// candidatePath is used to maintain required info for skyline pruning. -type candidatePath struct { - path *util.AccessPath - accessCondsColMap util.Col2Len // accessCondsColMap maps Column.UniqueID to column length for the columns in AccessConds. - indexCondsColMap util.Col2Len // indexCondsColMap maps Column.UniqueID to column length for the columns in AccessConds and indexFilters. - isMatchProp bool -} - -func compareBool(l, r bool) int { - if l == r { - return 0 - } - if !l { - return -1 - } - return 1 -} - -func compareIndexBack(lhs, rhs *candidatePath) (int, bool) { - result := compareBool(lhs.path.IsSingleScan, rhs.path.IsSingleScan) - if result == 0 && !lhs.path.IsSingleScan { - // if both lhs and rhs need to access table after IndexScan, we utilize the set of columns that occurred in AccessConds and IndexFilters - // to compare how many table rows will be accessed. - return util.CompareCol2Len(lhs.indexCondsColMap, rhs.indexCondsColMap) - } - return result, true -} - -func compareGlobalIndex(lhs, rhs *candidatePath) int { - if lhs.path.IsTablePath() || rhs.path.IsTablePath() || - len(lhs.path.PartialIndexPaths) != 0 || len(rhs.path.PartialIndexPaths) != 0 { - return 0 - } - return compareBool(lhs.path.Index.Global, rhs.path.Index.Global) -} - -// compareCandidates is the core of skyline pruning, which is used to decide which candidate path is better. -// The return value is 1 if lhs is better, -1 if rhs is better, 0 if they are equivalent or not comparable. -func compareCandidates(sctx base.PlanContext, prop *property.PhysicalProperty, lhs, rhs *candidatePath) int { - // Due to #50125, full scan on MVIndex has been disabled, so MVIndex path might lead to 'can't find a proper plan' error at the end. - // Avoid MVIndex path to exclude all other paths and leading to 'can't find a proper plan' error, see #49438 for an example. - if isMVIndexPath(lhs.path) || isMVIndexPath(rhs.path) { - return 0 - } - - // This rule is empirical but not always correct. - // If x's range row count is significantly lower than y's, for example, 1000 times, we think x is better. - if lhs.path.CountAfterAccess > 100 && rhs.path.CountAfterAccess > 100 && // to prevent some extreme cases, e.g. 0.01 : 10 - len(lhs.path.PartialIndexPaths) == 0 && len(rhs.path.PartialIndexPaths) == 0 && // not IndexMerge since its row count estimation is not accurate enough - prop.ExpectedCnt == math.MaxFloat64 { // Limit may affect access row count - threshold := float64(fixcontrol.GetIntWithDefault(sctx.GetSessionVars().OptimizerFixControl, fixcontrol.Fix45132, 1000)) - if threshold > 0 { // set it to 0 to disable this rule - if lhs.path.CountAfterAccess/rhs.path.CountAfterAccess > threshold { - return -1 - } - if rhs.path.CountAfterAccess/lhs.path.CountAfterAccess > threshold { - return 1 - } - } - } - - // Below compares the two candidate paths on three dimensions: - // (1): the set of columns that occurred in the access condition, - // (2): does it require a double scan, - // (3): whether or not it matches the physical property, - // (4): it's a global index path or not. - // If `x` is not worse than `y` at all factors, - // and there exists one factor that `x` is better than `y`, then `x` is better than `y`. - accessResult, comparable1 := util.CompareCol2Len(lhs.accessCondsColMap, rhs.accessCondsColMap) - if !comparable1 { - return 0 - } - scanResult, comparable2 := compareIndexBack(lhs, rhs) - if !comparable2 { - return 0 - } - matchResult, globalResult := compareBool(lhs.isMatchProp, rhs.isMatchProp), compareGlobalIndex(lhs, rhs) - sum := accessResult + scanResult + matchResult + globalResult - if accessResult >= 0 && scanResult >= 0 && matchResult >= 0 && globalResult >= 0 && sum > 0 { - return 1 - } - if accessResult <= 0 && scanResult <= 0 && matchResult <= 0 && globalResult <= 0 && sum < 0 { - return -1 - } - return 0 -} - -func isMatchProp(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) bool { - var isMatchProp bool - if path.IsIntHandlePath { - pkCol := ds.getPKIsHandleCol() - if len(prop.SortItems) == 1 && pkCol != nil { - isMatchProp = prop.SortItems[0].Col.EqualColumn(pkCol) - if path.StoreType == kv.TiFlash { - isMatchProp = isMatchProp && !prop.SortItems[0].Desc - } - } - return isMatchProp - } - all, _ := prop.AllSameOrder() - // When the prop is empty or `all` is false, `isMatchProp` is better to be `false` because - // it needs not to keep order for index scan. - - // Basically, if `prop.SortItems` is the prefix of `path.IdxCols`, then `isMatchProp` is true. However, we need to consider - // the situations when some columns of `path.IdxCols` are evaluated as constant. For example: - // ``` - // create table t(a int, b int, c int, d int, index idx_a_b_c(a, b, c), index idx_d_c_b_a(d, c, b, a)); - // select * from t where a = 1 order by b, c; - // select * from t where b = 1 order by a, c; - // select * from t where d = 1 and b = 2 order by c, a; - // select * from t where d = 1 and b = 2 order by c, b, a; - // ``` - // In the first two `SELECT` statements, `idx_a_b_c` matches the sort order. In the last two `SELECT` statements, `idx_d_c_b_a` - // matches the sort order. Hence, we use `path.ConstCols` to deal with the above situations. - if !prop.IsSortItemEmpty() && all && len(path.IdxCols) >= len(prop.SortItems) { - isMatchProp = true - i := 0 - for _, sortItem := range prop.SortItems { - found := false - for ; i < len(path.IdxCols); i++ { - if path.IdxColLens[i] == types.UnspecifiedLength && sortItem.Col.EqualColumn(path.IdxCols[i]) { - found = true - i++ - break - } - if path.ConstCols == nil || i >= len(path.ConstCols) || !path.ConstCols[i] { - break - } - } - if !found { - isMatchProp = false - break - } - } - } - return isMatchProp -} - -// matchPropForIndexMergeAlternatives will match the prop with inside PartialAlternativeIndexPaths, and choose -// 1 matched alternative to be a determined index merge partial path for each dimension in PartialAlternativeIndexPaths. -// finally, after we collected the all decided index merge partial paths, we will output a concrete index merge path -// with field PartialIndexPaths is fulfilled here. -// -// as we mentioned before, after deriveStats is done, the normal index OR path will be generated like below: -// -// `create table t (a int, b int, c int, key a(a), key b(b), key ac(a, c), key bc(b, c))` -// `explain format='verbose' select * from t where a=1 or b=1 order by c` -// -// like the case here: -// normal index merge OR path should be: -// for a=1, it has two partial alternative paths: [a, ac] -// for b=1, it has two partial alternative paths: [b, bc] -// and the index merge path: -// -// indexMergePath: { -// PartialIndexPaths: empty // 1D array here, currently is not decided yet. -// PartialAlternativeIndexPaths: [[a, ac], [b, bc]] // 2D array here, each for one DNF item choices. -// } -// -// let's say we have a prop requirement like sort by [c] here, we will choose the better one [ac] (because it can keep -// order) for the first batch [a, ac] from PartialAlternativeIndexPaths; and choose the better one [bc] (because it can -// keep order too) for the second batch [b, bc] from PartialAlternativeIndexPaths. Finally we output a concrete index -// merge path as -// -// indexMergePath: { -// PartialIndexPaths: [ac, bc] // just collected since they match the prop. -// ... -// } -// -// how about the prop is empty? that means the choice to be decided from [a, ac] and [b, bc] is quite random just according -// to their countAfterAccess. That's why we use a slices.SortFunc(matchIdxes, func(a, b int){}) inside there. After sort, -// the ASC order of matchIdxes of matched paths are ordered by their countAfterAccess, choosing the first one is straight forward. -// -// there is another case shown below, just the pick the first one after matchIdxes is ordered is not always right, as shown: -// special logic for alternative paths: -// -// index merge: -// matched paths-1: {pk, index1} -// matched paths-2: {pk} -// -// if we choose first one as we talked above, says pk here in the first matched paths, then path2 has no choice(avoiding all same -// index logic inside) but pk, this will result in all single index failure. so we need to sort the matchIdxes again according to -// their matched paths length, here mean: -// -// index merge: -// matched paths-1: {pk, index1} -// matched paths-2: {pk} -// -// and let matched paths-2 to be the first to make their determination --- choosing pk here, then next turn is matched paths-1 to -// make their choice, since pk is occupied, avoiding-all-same-index-logic inside will try to pick index1 here, so work can be done. -// -// at last, according to determinedIndexPartialPaths to rewrite their real countAfterAccess, this part is move from deriveStats to -// here. -func matchPropForIndexMergeAlternatives(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) (*util.AccessPath, bool) { - // target: - // 1: index merge case, try to match the every alternative partial path to the order property as long as - // possible, and generate that property-matched index merge path out if any. - // 2: If the prop is empty (means no sort requirement), we will generate a random index partial combination - // path from all alternatives in case that no index merge path comes out. - - // Execution part doesn't support the merge operation for intersection case yet. - if path.IndexMergeIsIntersection { - return nil, false - } - - noSortItem := prop.IsSortItemEmpty() - allSame, _ := prop.AllSameOrder() - if !allSame { - return nil, false - } - // step1: match the property from all the index partial alternative paths. - determinedIndexPartialPaths := make([]*util.AccessPath, 0, len(path.PartialAlternativeIndexPaths)) - usedIndexMap := make(map[int64]struct{}, 1) - type idxWrapper struct { - // matchIdx is those match alternative paths from one alternative paths set. - // like we said above, for a=1, it has two partial alternative paths: [a, ac] - // if we met an empty property here, matchIdx from [a, ac] for a=1 will be both. = [0,1] - // if we met an sort[c] property here, matchIdx from [a, ac] for a=1 will be both. = [1] - matchIdx []int - // pathIdx actually is original position offset indicates where current matchIdx is - // computed from. eg: [[a, ac], [b, bc]] for sort[c] property: - // idxWrapper{[ac], 0}, 0 is the offset in first dimension of PartialAlternativeIndexPaths - // idxWrapper{[bc], 1}, 1 is the offset in first dimension of PartialAlternativeIndexPaths - pathIdx int - } - allMatchIdxes := make([]idxWrapper, 0, len(path.PartialAlternativeIndexPaths)) - // special logic for alternative paths: - // index merge: - // path1: {pk, index1} - // path2: {pk} - // if we choose pk in the first path, then path2 has no choice but pk, this will result in all single index failure. - // so we should collect all match prop paths down, stored as matchIdxes here. - for pathIdx, oneItemAlternatives := range path.PartialAlternativeIndexPaths { - matchIdxes := make([]int, 0, 1) - for i, oneIndexAlternativePath := range oneItemAlternatives { - // if there is some sort items and this path doesn't match this prop, continue. - if !noSortItem && !isMatchProp(ds, oneIndexAlternativePath, prop) { - continue - } - // two possibility here: - // 1. no sort items requirement. - // 2. matched with sorted items. - matchIdxes = append(matchIdxes, i) - } - if len(matchIdxes) == 0 { - // if all index alternative of one of the cnf item's couldn't match the sort property, - // the entire index merge union path can be ignored for this sort property, return false. - return nil, false - } - if len(matchIdxes) > 1 { - // if matchIdxes greater than 1, we should sort this match alternative path by its CountAfterAccess. - tmpOneItemAlternatives := oneItemAlternatives - slices.SortStableFunc(matchIdxes, func(a, b int) int { - lhsCountAfter := tmpOneItemAlternatives[a].CountAfterAccess - if len(tmpOneItemAlternatives[a].IndexFilters) > 0 { - lhsCountAfter = tmpOneItemAlternatives[a].CountAfterIndex - } - rhsCountAfter := tmpOneItemAlternatives[b].CountAfterAccess - if len(tmpOneItemAlternatives[b].IndexFilters) > 0 { - rhsCountAfter = tmpOneItemAlternatives[b].CountAfterIndex - } - res := cmp.Compare(lhsCountAfter, rhsCountAfter) - if res != 0 { - return res - } - // If CountAfterAccess is same, any path is global index should be the first one. - var lIsGlobalIndex, rIsGlobalIndex int - if !tmpOneItemAlternatives[a].IsTablePath() && tmpOneItemAlternatives[a].Index.Global { - lIsGlobalIndex = 1 - } - if !tmpOneItemAlternatives[b].IsTablePath() && tmpOneItemAlternatives[b].Index.Global { - rIsGlobalIndex = 1 - } - return -cmp.Compare(lIsGlobalIndex, rIsGlobalIndex) - }) - } - allMatchIdxes = append(allMatchIdxes, idxWrapper{matchIdxes, pathIdx}) - } - // sort allMatchIdxes by its element length. - // index merge: index merge: - // path1: {pk, index1} ==> path2: {pk} - // path2: {pk} path1: {pk, index1} - // here for the fixed choice pk of path2, let it be the first one to choose, left choice of index1 to path1. - slices.SortStableFunc(allMatchIdxes, func(a, b idxWrapper) int { - lhsLen := len(a.matchIdx) - rhsLen := len(b.matchIdx) - return cmp.Compare(lhsLen, rhsLen) - }) - for _, matchIdxes := range allMatchIdxes { - // since matchIdxes are ordered by matchIdxes's length, - // we should use matchIdxes.pathIdx to locate where it comes from. - alternatives := path.PartialAlternativeIndexPaths[matchIdxes.pathIdx] - found := false - // pick a most suitable index partial alternative from all matched alternative paths according to asc CountAfterAccess, - // By this way, a distinguished one is better. - for _, oneIdx := range matchIdxes.matchIdx { - var indexID int64 - if alternatives[oneIdx].IsTablePath() { - indexID = -1 - } else { - indexID = alternatives[oneIdx].Index.ID - } - if _, ok := usedIndexMap[indexID]; !ok { - // try to avoid all index partial paths are all about a single index. - determinedIndexPartialPaths = append(determinedIndexPartialPaths, alternatives[oneIdx].Clone()) - usedIndexMap[indexID] = struct{}{} - found = true - break - } - } - if !found { - // just pick the same name index (just using the first one is ok), in case that there may be some other - // picked distinctive index path for other partial paths latter. - determinedIndexPartialPaths = append(determinedIndexPartialPaths, alternatives[matchIdxes.matchIdx[0]].Clone()) - // uedIndexMap[oneItemAlternatives[oneIdx].Index.ID] = struct{}{} must already be colored. - } - } - if len(usedIndexMap) == 1 { - // if all partial path are using a same index, meaningless and fail over. - return nil, false - } - // step2: gen a new **concrete** index merge path. - indexMergePath := &util.AccessPath{ - PartialIndexPaths: determinedIndexPartialPaths, - IndexMergeIsIntersection: false, - // inherit those determined can't pushed-down table filters. - TableFilters: path.TableFilters, - } - // path.ShouldBeKeptCurrentFilter record that whether there are some part of the cnf item couldn't be pushed down to tikv already. - shouldKeepCurrentFilter := path.KeepIndexMergeORSourceFilter - pushDownCtx := util.GetPushDownCtx(ds.SCtx()) - for _, path := range determinedIndexPartialPaths { - // If any partial path contains table filters, we need to keep the whole DNF filter in the Selection. - if len(path.TableFilters) > 0 { - if !expression.CanExprsPushDown(pushDownCtx, path.TableFilters, kv.TiKV) { - // if this table filters can't be pushed down, all of them should be kept in the table side, cleaning the lookup side here. - path.TableFilters = nil - } - shouldKeepCurrentFilter = true - } - // If any partial path's index filter cannot be pushed to TiKV, we should keep the whole DNF filter. - if len(path.IndexFilters) != 0 && !expression.CanExprsPushDown(pushDownCtx, path.IndexFilters, kv.TiKV) { - shouldKeepCurrentFilter = true - // Clear IndexFilter, the whole filter will be put in indexMergePath.TableFilters. - path.IndexFilters = nil - } - } - // Keep this filter as a part of table filters for safety if it has any parameter. - if expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), []expression.Expression{path.IndexMergeORSourceFilter}) { - shouldKeepCurrentFilter = true - } - if shouldKeepCurrentFilter { - // add the cnf expression back as table filer. - indexMergePath.TableFilters = append(indexMergePath.TableFilters, path.IndexMergeORSourceFilter) - } - - // step3: after the index merge path is determined, compute the countAfterAccess as usual. - accessConds := make([]expression.Expression, 0, len(determinedIndexPartialPaths)) - for _, p := range determinedIndexPartialPaths { - indexCondsForP := p.AccessConds[:] - indexCondsForP = append(indexCondsForP, p.IndexFilters...) - if len(indexCondsForP) > 0 { - accessConds = append(accessConds, expression.ComposeCNFCondition(ds.SCtx().GetExprCtx(), indexCondsForP...)) - } - } - accessDNF := expression.ComposeDNFCondition(ds.SCtx().GetExprCtx(), accessConds...) - sel, _, err := cardinality.Selectivity(ds.SCtx(), ds.TableStats.HistColl, []expression.Expression{accessDNF}, nil) - if err != nil { - logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err)) - sel = cost.SelectionFactor - } - indexMergePath.CountAfterAccess = sel * ds.TableStats.RowCount - if noSortItem { - // since there is no sort property, index merge case is generated by random combination, each alternative with the lower/lowest - // countAfterAccess, here the returned matchProperty should be false. - return indexMergePath, false - } - return indexMergePath, true -} - -func isMatchPropForIndexMerge(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) bool { - // Execution part doesn't support the merge operation for intersection case yet. - if path.IndexMergeIsIntersection { - return false - } - allSame, _ := prop.AllSameOrder() - if !allSame { - return false - } - for _, partialPath := range path.PartialIndexPaths { - if !isMatchProp(ds, partialPath, prop) { - return false - } - } - return true -} - -func getTableCandidate(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { - candidate := &candidatePath{path: path} - candidate.isMatchProp = isMatchProp(ds, path, prop) - candidate.accessCondsColMap = util.ExtractCol2Len(ds.SCtx().GetExprCtx().GetEvalCtx(), path.AccessConds, nil, nil) - return candidate -} - -func getIndexCandidate(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { - candidate := &candidatePath{path: path} - candidate.isMatchProp = isMatchProp(ds, path, prop) - candidate.accessCondsColMap = util.ExtractCol2Len(ds.SCtx().GetExprCtx().GetEvalCtx(), path.AccessConds, path.IdxCols, path.IdxColLens) - candidate.indexCondsColMap = util.ExtractCol2Len(ds.SCtx().GetExprCtx().GetEvalCtx(), append(path.AccessConds, path.IndexFilters...), path.FullIdxCols, path.FullIdxColLens) - return candidate -} - -func convergeIndexMergeCandidate(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { - // since the all index path alternative paths is collected and undetermined, and we should determine a possible and concrete path for this prop. - possiblePath, match := matchPropForIndexMergeAlternatives(ds, path, prop) - if possiblePath == nil { - return nil - } - candidate := &candidatePath{path: possiblePath, isMatchProp: match} - return candidate -} - -func getIndexMergeCandidate(ds *DataSource, path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { - candidate := &candidatePath{path: path} - candidate.isMatchProp = isMatchPropForIndexMerge(ds, path, prop) - return candidate -} - -// skylinePruning prunes access paths according to different factors. An access path can be pruned only if -// there exists a path that is not worse than it at all factors and there is at least one better factor. -func skylinePruning(ds *DataSource, prop *property.PhysicalProperty) []*candidatePath { - candidates := make([]*candidatePath, 0, 4) - for _, path := range ds.PossibleAccessPaths { - // We should check whether the possible access path is valid first. - if path.StoreType != kv.TiFlash && prop.IsFlashProp() { - continue - } - if len(path.PartialAlternativeIndexPaths) > 0 { - // OR normal index merge path, try to determine every index partial path for this property. - candidate := convergeIndexMergeCandidate(ds, path, prop) - if candidate != nil { - candidates = append(candidates, candidate) - } - continue - } - if path.PartialIndexPaths != nil { - candidates = append(candidates, getIndexMergeCandidate(ds, path, prop)) - continue - } - // if we already know the range of the scan is empty, just return a TableDual - if len(path.Ranges) == 0 { - return []*candidatePath{{path: path}} - } - var currentCandidate *candidatePath - if path.IsTablePath() { - currentCandidate = getTableCandidate(ds, path, prop) - } else { - if !(len(path.AccessConds) > 0 || !prop.IsSortItemEmpty() || path.Forced || path.IsSingleScan) { - continue - } - // We will use index to generate physical plan if any of the following conditions is satisfied: - // 1. This path's access cond is not nil. - // 2. We have a non-empty prop to match. - // 3. This index is forced to choose. - // 4. The needed columns are all covered by index columns(and handleCol). - currentCandidate = getIndexCandidate(ds, path, prop) - } - pruned := false - for i := len(candidates) - 1; i >= 0; i-- { - if candidates[i].path.StoreType == kv.TiFlash { - continue - } - result := compareCandidates(ds.SCtx(), prop, candidates[i], currentCandidate) - if result == 1 { - pruned = true - // We can break here because the current candidate cannot prune others anymore. - break - } else if result == -1 { - candidates = append(candidates[:i], candidates[i+1:]...) - } - } - if !pruned { - candidates = append(candidates, currentCandidate) - } - } - - preferRange := ds.SCtx().GetSessionVars().GetAllowPreferRangeScan() && (ds.TableStats.HistColl.Pseudo || ds.TableStats.RowCount < 1) - // If we've forced an index merge - we want to keep these plans - preferMerge := len(ds.IndexMergeHints) > 0 || fixcontrol.GetBoolWithDefault( - ds.SCtx().GetSessionVars().GetOptimizerFixControlMap(), - fixcontrol.Fix52869, - false, - ) - if preferRange && len(candidates) > 1 { - // If a candidate path is TiFlash-path or forced-path or MV index, we just keep them. For other candidate paths, if there exists - // any range scan path, we remove full scan paths and keep range scan paths. - preferredPaths := make([]*candidatePath, 0, len(candidates)) - var hasRangeScanPath bool - for _, c := range candidates { - if c.path.Forced || c.path.StoreType == kv.TiFlash || (c.path.Index != nil && c.path.Index.MVIndex) { - preferredPaths = append(preferredPaths, c) - continue - } - var unsignedIntHandle bool - if c.path.IsIntHandlePath && ds.TableInfo.PKIsHandle { - if pkColInfo := ds.TableInfo.GetPkColInfo(); pkColInfo != nil { - unsignedIntHandle = mysql.HasUnsignedFlag(pkColInfo.GetFlag()) - } - } - if !ranger.HasFullRange(c.path.Ranges, unsignedIntHandle) { - // Preference plans with equals/IN predicates or where there is more filtering in the index than against the table - equalPlan := c.path.EqCondCount > 0 || c.path.EqOrInCondCount > 0 - indexFilters := len(c.path.TableFilters) < len(c.path.IndexFilters) - if preferMerge || (((equalPlan || indexFilters) && prop.IsSortItemEmpty()) || c.isMatchProp) { - preferredPaths = append(preferredPaths, c) - hasRangeScanPath = true - } - } - } - if hasRangeScanPath { - return preferredPaths - } - } - - return candidates -} - -func getPruningInfo(ds *DataSource, candidates []*candidatePath, prop *property.PhysicalProperty) string { - if len(candidates) == len(ds.PossibleAccessPaths) { - return "" - } - if len(candidates) == 1 && len(candidates[0].path.Ranges) == 0 { - // For TableDual, we don't need to output pruning info. - return "" - } - names := make([]string, 0, len(candidates)) - var tableName string - if ds.TableAsName.O == "" { - tableName = ds.TableInfo.Name.O - } else { - tableName = ds.TableAsName.O - } - getSimplePathName := func(path *util.AccessPath) string { - if path.IsTablePath() { - if path.StoreType == kv.TiFlash { - return tableName + "(tiflash)" - } - return tableName - } - return path.Index.Name.O - } - for _, cand := range candidates { - if cand.path.PartialIndexPaths != nil { - partialNames := make([]string, 0, len(cand.path.PartialIndexPaths)) - for _, partialPath := range cand.path.PartialIndexPaths { - partialNames = append(partialNames, getSimplePathName(partialPath)) - } - names = append(names, fmt.Sprintf("IndexMerge{%s}", strings.Join(partialNames, ","))) - } else { - names = append(names, getSimplePathName(cand.path)) - } - } - items := make([]string, 0, len(prop.SortItems)) - for _, item := range prop.SortItems { - items = append(items, item.String()) - } - return fmt.Sprintf("[%s] remain after pruning paths for %s given Prop{SortItems: [%s], TaskTp: %s}", - strings.Join(names, ","), tableName, strings.Join(items, " "), prop.TaskTp) -} - -func isPointGetConvertableSchema(ds *DataSource) bool { - for _, col := range ds.Columns { - if col.Name.L == model.ExtraHandleName.L { - continue - } - - // Only handle tables that all columns are public. - if col.State != model.StatePublic { - return false - } - } - return true -} - -// exploreEnforcedPlan determines whether to explore enforced plans for this DataSource if it has already found an unenforced plan. -// See #46177 for more information. -func exploreEnforcedPlan(ds *DataSource) bool { - // default value is false to keep it compatible with previous versions. - return fixcontrol.GetBoolWithDefault(ds.SCtx().GetSessionVars().GetOptimizerFixControlMap(), fixcontrol.Fix46177, false) -} - -func findBestTask4DS(ds *DataSource, prop *property.PhysicalProperty, planCounter *base.PlanCounterTp, opt *optimizetrace.PhysicalOptimizeOp) (t base.Task, cntPlan int64, err error) { - // If ds is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself, - // and set inner child prop nil, so here we do nothing. - if prop == nil { - planCounter.Dec(1) - return nil, 1, nil - } - if ds.IsForUpdateRead && ds.SCtx().GetSessionVars().TxnCtx.IsExplicit { - hasPointGetPath := false - for _, path := range ds.PossibleAccessPaths { - if isPointGetPath(ds, path) { - hasPointGetPath = true - break - } - } - tblName := ds.TableInfo.Name - ds.PossibleAccessPaths, err = filterPathByIsolationRead(ds.SCtx(), ds.PossibleAccessPaths, tblName, ds.DBName) - if err != nil { - return nil, 1, err - } - if hasPointGetPath { - newPaths := make([]*util.AccessPath, 0) - for _, path := range ds.PossibleAccessPaths { - // if the path is the point get range path with for update lock, we should forbid tiflash as it's store path (#39543) - if path.StoreType != kv.TiFlash { - newPaths = append(newPaths, path) - } - } - ds.PossibleAccessPaths = newPaths - } - } - t = ds.GetTask(prop) - if t != nil { - cntPlan = 1 - planCounter.Dec(1) - return - } - var cnt int64 - var unenforcedTask base.Task - // If prop.CanAddEnforcer is true, the prop.SortItems need to be set nil for ds.findBestTask. - // Before function return, reset it for enforcing task prop and storing map. - oldProp := prop.CloneEssentialFields() - if prop.CanAddEnforcer { - // First, get the bestTask without enforced prop - prop.CanAddEnforcer = false - unenforcedTask, cnt, err = ds.FindBestTask(prop, planCounter, opt) - if err != nil { - return nil, 0, err - } - if !unenforcedTask.Invalid() && !exploreEnforcedPlan(ds) { - ds.StoreTask(prop, unenforcedTask) - return unenforcedTask, cnt, nil - } - - // Then, explore the bestTask with enforced prop - prop.CanAddEnforcer = true - cntPlan += cnt - prop.SortItems = []property.SortItem{} - prop.MPPPartitionTp = property.AnyType - } else if prop.MPPPartitionTp != property.AnyType { - return base.InvalidTask, 0, nil - } - defer func() { - if err != nil { - return - } - if prop.CanAddEnforcer { - *prop = *oldProp - t = enforceProperty(prop, t, ds.Plan.SCtx()) - prop.CanAddEnforcer = true - } - - if unenforcedTask != nil && !unenforcedTask.Invalid() { - curIsBest, cerr := compareTaskCost(unenforcedTask, t, opt) - if cerr != nil { - err = cerr - return - } - if curIsBest { - t = unenforcedTask - } - } - - ds.StoreTask(prop, t) - err = validateTableSamplePlan(ds, t, err) - }() - - t, err = tryToGetDualTask(ds) - if err != nil || t != nil { - planCounter.Dec(1) - if t != nil { - appendCandidate(ds, t, prop, opt) - } - return t, 1, err - } - - t = base.InvalidTask - candidates := skylinePruning(ds, prop) - pruningInfo := getPruningInfo(ds, candidates, prop) - defer func() { - if err == nil && t != nil && !t.Invalid() && pruningInfo != "" { - warnErr := errors.NewNoStackError(pruningInfo) - if ds.SCtx().GetSessionVars().StmtCtx.InVerboseExplain { - ds.SCtx().GetSessionVars().StmtCtx.AppendNote(warnErr) - } else { - ds.SCtx().GetSessionVars().StmtCtx.AppendExtraNote(warnErr) - } - } - }() - - cntPlan = 0 - for _, candidate := range candidates { - path := candidate.path - if path.PartialIndexPaths != nil { - // prefer tiflash, while current table path is tikv, skip it. - if ds.PreferStoreType&h.PreferTiFlash != 0 && path.StoreType == kv.TiKV { - continue - } - idxMergeTask, err := convertToIndexMergeScan(ds, prop, candidate, opt) - if err != nil { - return nil, 0, err - } - if !idxMergeTask.Invalid() { - cntPlan++ - planCounter.Dec(1) - } - appendCandidate(ds, idxMergeTask, prop, opt) - - curIsBetter, err := compareTaskCost(idxMergeTask, t, opt) - if err != nil { - return nil, 0, err - } - if curIsBetter || planCounter.Empty() { - t = idxMergeTask - } - if planCounter.Empty() { - return t, cntPlan, nil - } - continue - } - // if we already know the range of the scan is empty, just return a TableDual - if len(path.Ranges) == 0 { - // We should uncache the tableDual plan. - if expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), path.AccessConds) { - ds.SCtx().GetSessionVars().StmtCtx.SetSkipPlanCache("get a TableDual plan") - } - dual := PhysicalTableDual{}.Init(ds.SCtx(), ds.StatsInfo(), ds.QueryBlockOffset()) - dual.SetSchema(ds.Schema()) - cntPlan++ - planCounter.Dec(1) - t := &RootTask{} - t.SetPlan(dual) - appendCandidate(ds, t, prop, opt) - return t, cntPlan, nil - } - - canConvertPointGet := len(path.Ranges) > 0 && path.StoreType == kv.TiKV && isPointGetConvertableSchema(ds) - - if canConvertPointGet && path.Index != nil && path.Index.MVIndex { - canConvertPointGet = false // cannot use PointGet upon MVIndex - } - - if canConvertPointGet && !path.IsIntHandlePath { - // We simply do not build [batch] point get for prefix indexes. This can be optimized. - canConvertPointGet = path.Index.Unique && !path.Index.HasPrefixIndex() - // If any range cannot cover all columns of the index, we cannot build [batch] point get. - idxColsLen := len(path.Index.Columns) - for _, ran := range path.Ranges { - if len(ran.LowVal) != idxColsLen { - canConvertPointGet = false - break - } - } - } - if canConvertPointGet && ds.table.Meta().GetPartitionInfo() != nil { - // partition table with dynamic prune not support batchPointGet - // Due to sorting? - // Please make sure handle `where _tidb_rowid in (xx, xx)` correctly when delete this if statements. - if canConvertPointGet && len(path.Ranges) > 1 && ds.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { - canConvertPointGet = false - } - if canConvertPointGet && len(path.Ranges) > 1 { - // TODO: This is now implemented, but to decrease - // the impact of supporting plan cache for patitioning, - // this is not yet enabled. - // TODO: just remove this if block and update/add tests... - // We can only build batch point get for hash partitions on a simple column now. This is - // decided by the current implementation of `BatchPointGetExec::initialize()`, specifically, - // the `getPhysID()` function. Once we optimize that part, we can come back and enable - // BatchPointGet plan for more cases. - hashPartColName := getHashOrKeyPartitionColumnName(ds.SCtx(), ds.table.Meta()) - if hashPartColName == nil { - canConvertPointGet = false - } - } - // Partition table can't use `_tidb_rowid` to generate PointGet Plan unless one partition is explicitly specified. - if canConvertPointGet && path.IsIntHandlePath && !ds.table.Meta().PKIsHandle && len(ds.PartitionNames) != 1 { - canConvertPointGet = false - } - if canConvertPointGet { - if path != nil && path.Index != nil && path.Index.Global { - // Don't convert to point get during ddl - // TODO: Revisit truncate partition and global index - if len(ds.TableInfo.GetPartitionInfo().DroppingDefinitions) > 0 || - len(ds.TableInfo.GetPartitionInfo().AddingDefinitions) > 0 { - canConvertPointGet = false - } - } - } - } - if canConvertPointGet { - allRangeIsPoint := true - tc := ds.SCtx().GetSessionVars().StmtCtx.TypeCtx() - for _, ran := range path.Ranges { - if !ran.IsPointNonNullable(tc) { - // unique indexes can have duplicated NULL rows so we cannot use PointGet if there is NULL - allRangeIsPoint = false - break - } - } - if allRangeIsPoint { - var pointGetTask base.Task - if len(path.Ranges) == 1 { - pointGetTask = convertToPointGet(ds, prop, candidate) - } else { - pointGetTask = convertToBatchPointGet(ds, prop, candidate) - } - - // Batch/PointGet plans may be over-optimized, like `a>=1(?) and a<=1(?)` --> `a=1` --> PointGet(a=1). - // For safety, prevent these plans from the plan cache here. - if !pointGetTask.Invalid() && expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), candidate.path.AccessConds) && !isSafePointGetPath4PlanCache(ds.SCtx(), candidate.path) { - ds.SCtx().GetSessionVars().StmtCtx.SetSkipPlanCache("Batch/PointGet plans may be over-optimized") - } - - appendCandidate(ds, pointGetTask, prop, opt) - if !pointGetTask.Invalid() { - cntPlan++ - planCounter.Dec(1) - } - curIsBetter, cerr := compareTaskCost(pointGetTask, t, opt) - if cerr != nil { - return nil, 0, cerr - } - if curIsBetter || planCounter.Empty() { - t = pointGetTask - if planCounter.Empty() { - return - } - continue - } - } - } - if path.IsTablePath() { - // prefer tiflash, while current table path is tikv, skip it. - if ds.PreferStoreType&h.PreferTiFlash != 0 && path.StoreType == kv.TiKV { - continue - } - // prefer tikv, while current table path is tiflash, skip it. - if ds.PreferStoreType&h.PreferTiKV != 0 && path.StoreType == kv.TiFlash { - continue - } - var tblTask base.Task - if ds.SampleInfo != nil { - tblTask, err = convertToSampleTable(ds, prop, candidate, opt) - } else { - tblTask, err = convertToTableScan(ds, prop, candidate, opt) - } - if err != nil { - return nil, 0, err - } - if !tblTask.Invalid() { - cntPlan++ - planCounter.Dec(1) - } - appendCandidate(ds, tblTask, prop, opt) - curIsBetter, err := compareTaskCost(tblTask, t, opt) - if err != nil { - return nil, 0, err - } - if curIsBetter || planCounter.Empty() { - t = tblTask - } - if planCounter.Empty() { - return t, cntPlan, nil - } - continue - } - // TiFlash storage do not support index scan. - if ds.PreferStoreType&h.PreferTiFlash != 0 { - continue - } - // TableSample do not support index scan. - if ds.SampleInfo != nil { - continue - } - idxTask, err := convertToIndexScan(ds, prop, candidate, opt) - if err != nil { - return nil, 0, err - } - if !idxTask.Invalid() { - cntPlan++ - planCounter.Dec(1) - } - appendCandidate(ds, idxTask, prop, opt) - curIsBetter, err := compareTaskCost(idxTask, t, opt) - if err != nil { - return nil, 0, err - } - if curIsBetter || planCounter.Empty() { - t = idxTask - } - if planCounter.Empty() { - return t, cntPlan, nil - } - } - - return -} - -// convertToIndexMergeScan builds the index merge scan for intersection or union cases. -func convertToIndexMergeScan(ds *DataSource, prop *property.PhysicalProperty, candidate *candidatePath, _ *optimizetrace.PhysicalOptimizeOp) (task base.Task, err error) { - if prop.IsFlashProp() || prop.TaskTp == property.CopSingleReadTaskType { - return base.InvalidTask, nil - } - // lift the limitation of that double read can not build index merge **COP** task with intersection. - // that means we can output a cop task here without encapsulating it as root task, for the convenience of attaching limit to its table side. - - if !prop.IsSortItemEmpty() && !candidate.isMatchProp { - return base.InvalidTask, nil - } - // while for now, we still can not push the sort prop to the intersection index plan side, temporarily banned here. - if !prop.IsSortItemEmpty() && candidate.path.IndexMergeIsIntersection { - return base.InvalidTask, nil - } - failpoint.Inject("forceIndexMergeKeepOrder", func(_ failpoint.Value) { - if len(candidate.path.PartialIndexPaths) > 0 && !candidate.path.IndexMergeIsIntersection { - if prop.IsSortItemEmpty() { - failpoint.Return(base.InvalidTask, nil) - } - } - }) - path := candidate.path - scans := make([]base.PhysicalPlan, 0, len(path.PartialIndexPaths)) - cop := &CopTask{ - indexPlanFinished: false, - tblColHists: ds.TblColHists, - } - cop.physPlanPartInfo = &PhysPlanPartInfo{ - PruningConds: pushDownNot(ds.SCtx().GetExprCtx(), ds.AllConds), - PartitionNames: ds.PartitionNames, - Columns: ds.TblCols, - ColumnNames: ds.OutputNames(), - } - // Add sort items for index scan for merge-sort operation between partitions. - byItems := make([]*util.ByItems, 0, len(prop.SortItems)) - for _, si := range prop.SortItems { - byItems = append(byItems, &util.ByItems{ - Expr: si.Col, - Desc: si.Desc, - }) - } - globalRemainingFilters := make([]expression.Expression, 0, 3) - for _, partPath := range path.PartialIndexPaths { - var scan base.PhysicalPlan - if partPath.IsTablePath() { - scan = convertToPartialTableScan(ds, prop, partPath, candidate.isMatchProp, byItems) - } else { - var remainingFilters []expression.Expression - scan, remainingFilters, err = convertToPartialIndexScan(ds, cop.physPlanPartInfo, prop, partPath, candidate.isMatchProp, byItems) - if err != nil { - return base.InvalidTask, err - } - if prop.TaskTp != property.RootTaskType && len(remainingFilters) > 0 { - return base.InvalidTask, nil - } - globalRemainingFilters = append(globalRemainingFilters, remainingFilters...) - } - scans = append(scans, scan) - } - totalRowCount := path.CountAfterAccess - if prop.ExpectedCnt < ds.StatsInfo().RowCount { - totalRowCount *= prop.ExpectedCnt / ds.StatsInfo().RowCount - } - ts, remainingFilters2, moreColumn, err := buildIndexMergeTableScan(ds, path.TableFilters, totalRowCount, candidate.isMatchProp) - if err != nil { - return base.InvalidTask, err - } - if prop.TaskTp != property.RootTaskType && len(remainingFilters2) > 0 { - return base.InvalidTask, nil - } - globalRemainingFilters = append(globalRemainingFilters, remainingFilters2...) - cop.keepOrder = candidate.isMatchProp - cop.tablePlan = ts - cop.idxMergePartPlans = scans - cop.idxMergeIsIntersection = path.IndexMergeIsIntersection - cop.idxMergeAccessMVIndex = path.IndexMergeAccessMVIndex - if moreColumn { - cop.needExtraProj = true - cop.originSchema = ds.Schema() - } - if len(globalRemainingFilters) != 0 { - cop.rootTaskConds = globalRemainingFilters - } - // after we lift the limitation of intersection and cop-type task in the code in this - // function above, we could set its index plan finished as true once we found its table - // plan is pure table scan below. - // And this will cause cost underestimation when we estimate the cost of the entire cop - // task plan in function `getTaskPlanCost`. - if prop.TaskTp == property.RootTaskType { - cop.indexPlanFinished = true - task = cop.ConvertToRootTask(ds.SCtx()) - } else { - _, pureTableScan := ts.(*PhysicalTableScan) - if !pureTableScan { - cop.indexPlanFinished = true - } - task = cop - } - return task, nil -} - -func convertToPartialIndexScan(ds *DataSource, physPlanPartInfo *PhysPlanPartInfo, prop *property.PhysicalProperty, path *util.AccessPath, matchProp bool, byItems []*util.ByItems) (base.PhysicalPlan, []expression.Expression, error) { - is := getOriginalPhysicalIndexScan(ds, prop, path, matchProp, false) - // TODO: Consider using isIndexCoveringColumns() to avoid another TableRead - indexConds := path.IndexFilters - if matchProp { - if is.Table.GetPartitionInfo() != nil && !is.Index.Global && is.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { - is.Columns, is.schema, _ = AddExtraPhysTblIDColumn(is.SCtx(), is.Columns, is.schema) - } - // Add sort items for index scan for merge-sort operation between partitions. - is.ByItems = byItems - } - - // Add a `Selection` for `IndexScan` with global index. - // It should pushdown to TiKV, DataSource schema doesn't contain partition id column. - indexConds, err := is.addSelectionConditionForGlobalIndex(ds, physPlanPartInfo, indexConds) - if err != nil { - return nil, nil, err - } - - if len(indexConds) > 0 { - pushedFilters, remainingFilter := extractFiltersForIndexMerge(util.GetPushDownCtx(ds.SCtx()), indexConds) - var selectivity float64 - if path.CountAfterAccess > 0 { - selectivity = path.CountAfterIndex / path.CountAfterAccess - } - rowCount := is.StatsInfo().RowCount * selectivity - stats := &property.StatsInfo{RowCount: rowCount} - stats.StatsVersion = ds.StatisticTable.Version - if ds.StatisticTable.Pseudo { - stats.StatsVersion = statistics.PseudoVersion - } - indexPlan := PhysicalSelection{Conditions: pushedFilters}.Init(is.SCtx(), stats, ds.QueryBlockOffset()) - indexPlan.SetChildren(is) - return indexPlan, remainingFilter, nil - } - return is, nil, nil -} - -func checkColinSchema(cols []*expression.Column, schema *expression.Schema) bool { - for _, col := range cols { - if schema.ColumnIndex(col) == -1 { - return false - } - } - return true -} - -func convertToPartialTableScan(ds *DataSource, prop *property.PhysicalProperty, path *util.AccessPath, matchProp bool, byItems []*util.ByItems) (tablePlan base.PhysicalPlan) { - ts, rowCount := getOriginalPhysicalTableScan(ds, prop, path, matchProp) - overwritePartialTableScanSchema(ds, ts) - // remove ineffetive filter condition after overwriting physicalscan schema - newFilterConds := make([]expression.Expression, 0, len(path.TableFilters)) - for _, cond := range ts.filterCondition { - cols := expression.ExtractColumns(cond) - if checkColinSchema(cols, ts.schema) { - newFilterConds = append(newFilterConds, cond) - } - } - ts.filterCondition = newFilterConds - if matchProp { - if ts.Table.GetPartitionInfo() != nil && ts.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { - ts.Columns, ts.schema, _ = AddExtraPhysTblIDColumn(ts.SCtx(), ts.Columns, ts.schema) - } - ts.ByItems = byItems - } - if len(ts.filterCondition) > 0 { - selectivity, _, err := cardinality.Selectivity(ds.SCtx(), ds.TableStats.HistColl, ts.filterCondition, nil) - if err != nil { - logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) - selectivity = cost.SelectionFactor - } - tablePlan = PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.SCtx(), ts.StatsInfo().ScaleByExpectCnt(selectivity*rowCount), ds.QueryBlockOffset()) - tablePlan.SetChildren(ts) - return tablePlan - } - tablePlan = ts - return tablePlan -} - -// overwritePartialTableScanSchema change the schema of partial table scan to handle columns. -func overwritePartialTableScanSchema(ds *DataSource, ts *PhysicalTableScan) { - handleCols := ds.HandleCols - if handleCols == nil { - handleCols = util.NewIntHandleCols(ds.newExtraHandleSchemaCol()) - } - hdColNum := handleCols.NumCols() - exprCols := make([]*expression.Column, 0, hdColNum) - infoCols := make([]*model.ColumnInfo, 0, hdColNum) - for i := 0; i < hdColNum; i++ { - col := handleCols.GetCol(i) - exprCols = append(exprCols, col) - if c := model.FindColumnInfoByID(ds.TableInfo.Columns, col.ID); c != nil { - infoCols = append(infoCols, c) - } else { - infoCols = append(infoCols, col.ToInfo()) - } - } - ts.schema = expression.NewSchema(exprCols...) - ts.Columns = infoCols -} - -// setIndexMergeTableScanHandleCols set the handle columns of the table scan. -func setIndexMergeTableScanHandleCols(ds *DataSource, ts *PhysicalTableScan) (err error) { - handleCols := ds.HandleCols - if handleCols == nil { - handleCols = util.NewIntHandleCols(ds.newExtraHandleSchemaCol()) - } - hdColNum := handleCols.NumCols() - exprCols := make([]*expression.Column, 0, hdColNum) - for i := 0; i < hdColNum; i++ { - col := handleCols.GetCol(i) - exprCols = append(exprCols, col) - } - ts.HandleCols, err = handleCols.ResolveIndices(expression.NewSchema(exprCols...)) - return -} - -// buildIndexMergeTableScan() returns Selection that will be pushed to TiKV. -// Filters that cannot be pushed to TiKV are also returned, and an extra Selection above IndexMergeReader will be constructed later. -func buildIndexMergeTableScan(ds *DataSource, tableFilters []expression.Expression, - totalRowCount float64, matchProp bool) (base.PhysicalPlan, []expression.Expression, bool, error) { - ts := PhysicalTableScan{ - Table: ds.TableInfo, - Columns: slices.Clone(ds.Columns), - TableAsName: ds.TableAsName, - DBName: ds.DBName, - isPartition: ds.PartitionDefIdx != nil, - physicalTableID: ds.PhysicalTableID, - HandleCols: ds.HandleCols, - tblCols: ds.TblCols, - tblColHists: ds.TblColHists, - }.Init(ds.SCtx(), ds.QueryBlockOffset()) - ts.SetSchema(ds.Schema().Clone()) - err := setIndexMergeTableScanHandleCols(ds, ts) - if err != nil { - return nil, nil, false, err - } - ts.SetStats(ds.TableStats.ScaleByExpectCnt(totalRowCount)) - usedStats := ds.SCtx().GetSessionVars().StmtCtx.GetUsedStatsInfo(false) - if usedStats != nil && usedStats.GetUsedInfo(ts.physicalTableID) != nil { - ts.usedStatsInfo = usedStats.GetUsedInfo(ts.physicalTableID) - } - if ds.StatisticTable.Pseudo { - ts.StatsInfo().StatsVersion = statistics.PseudoVersion - } - var currentTopPlan base.PhysicalPlan = ts - if len(tableFilters) > 0 { - pushedFilters, remainingFilters := extractFiltersForIndexMerge(util.GetPushDownCtx(ds.SCtx()), tableFilters) - pushedFilters1, remainingFilters1 := SplitSelCondsWithVirtualColumn(pushedFilters) - pushedFilters = pushedFilters1 - remainingFilters = append(remainingFilters, remainingFilters1...) - if len(pushedFilters) != 0 { - selectivity, _, err := cardinality.Selectivity(ds.SCtx(), ds.TableStats.HistColl, pushedFilters, nil) - if err != nil { - logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) - selectivity = cost.SelectionFactor - } - sel := PhysicalSelection{Conditions: pushedFilters}.Init(ts.SCtx(), ts.StatsInfo().ScaleByExpectCnt(selectivity*totalRowCount), ts.QueryBlockOffset()) - sel.SetChildren(ts) - currentTopPlan = sel - } - if len(remainingFilters) > 0 { - return currentTopPlan, remainingFilters, false, nil - } - } - // If we don't need to use ordered scan, we don't need do the following codes for adding new columns. - if !matchProp { - return currentTopPlan, nil, false, nil - } - - // Add the row handle into the schema. - columnAdded := false - if ts.Table.PKIsHandle { - pk := ts.Table.GetPkColInfo() - pkCol := expression.ColInfo2Col(ts.tblCols, pk) - if !ts.schema.Contains(pkCol) { - ts.schema.Append(pkCol) - ts.Columns = append(ts.Columns, pk) - columnAdded = true - } - } else if ts.Table.IsCommonHandle { - idxInfo := ts.Table.GetPrimaryKey() - for _, idxCol := range idxInfo.Columns { - col := ts.tblCols[idxCol.Offset] - if !ts.schema.Contains(col) { - columnAdded = true - ts.schema.Append(col) - ts.Columns = append(ts.Columns, col.ToInfo()) - } - } - } else if !ts.schema.Contains(ts.HandleCols.GetCol(0)) { - ts.schema.Append(ts.HandleCols.GetCol(0)) - ts.Columns = append(ts.Columns, model.NewExtraHandleColInfo()) - columnAdded = true - } - - // For the global index of the partitioned table, we also need the PhysicalTblID to identify the rows from each partition. - if ts.Table.GetPartitionInfo() != nil && ts.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { - var newColAdded bool - ts.Columns, ts.schema, newColAdded = AddExtraPhysTblIDColumn(ts.SCtx(), ts.Columns, ts.schema) - columnAdded = columnAdded || newColAdded - } - return currentTopPlan, nil, columnAdded, nil -} - -// extractFiltersForIndexMerge returns: -// `pushed`: exprs that can be pushed to TiKV. -// `remaining`: exprs that can NOT be pushed to TiKV but can be pushed to other storage engines. -// Why do we need this func? -// IndexMerge only works on TiKV, so we need to find all exprs that cannot be pushed to TiKV, and add a new Selection above IndexMergeReader. -// -// But the new Selection should exclude the exprs that can NOT be pushed to ALL the storage engines. -// Because these exprs have already been put in another Selection(check rule_predicate_push_down). -func extractFiltersForIndexMerge(ctx expression.PushDownContext, filters []expression.Expression) (pushed []expression.Expression, remaining []expression.Expression) { - for _, expr := range filters { - if expression.CanExprsPushDown(ctx, []expression.Expression{expr}, kv.TiKV) { - pushed = append(pushed, expr) - continue - } - if expression.CanExprsPushDown(ctx, []expression.Expression{expr}, kv.UnSpecified) { - remaining = append(remaining, expr) - } - } - return -} - -func isIndexColsCoveringCol(sctx expression.EvalContext, col *expression.Column, indexCols []*expression.Column, idxColLens []int, ignoreLen bool) bool { - for i, indexCol := range indexCols { - if indexCol == nil || !col.EqualByExprAndID(sctx, indexCol) { - continue - } - if ignoreLen || idxColLens[i] == types.UnspecifiedLength || idxColLens[i] == col.RetType.GetFlen() { - return true - } - } - return false -} - -func indexCoveringColumn(ds *DataSource, column *expression.Column, indexColumns []*expression.Column, idxColLens []int, ignoreLen bool) bool { - if ds.TableInfo.PKIsHandle && mysql.HasPriKeyFlag(column.RetType.GetFlag()) { - return true - } - if column.ID == model.ExtraHandleID || column.ID == model.ExtraPhysTblID { - return true - } - evalCtx := ds.SCtx().GetExprCtx().GetEvalCtx() - coveredByPlainIndex := isIndexColsCoveringCol(evalCtx, column, indexColumns, idxColLens, ignoreLen) - coveredByClusteredIndex := isIndexColsCoveringCol(evalCtx, column, ds.CommonHandleCols, ds.CommonHandleLens, ignoreLen) - if !coveredByPlainIndex && !coveredByClusteredIndex { - return false - } - isClusteredNewCollationIdx := collate.NewCollationEnabled() && - column.GetType(evalCtx).EvalType() == types.ETString && - !mysql.HasBinaryFlag(column.GetType(evalCtx).GetFlag()) - if !coveredByPlainIndex && coveredByClusteredIndex && isClusteredNewCollationIdx && ds.table.Meta().CommonHandleVersion == 0 { - return false - } - return true -} - -func isIndexCoveringColumns(ds *DataSource, columns, indexColumns []*expression.Column, idxColLens []int) bool { - for _, col := range columns { - if !indexCoveringColumn(ds, col, indexColumns, idxColLens, false) { - return false - } - } - return true -} - -func isIndexCoveringCondition(ds *DataSource, condition expression.Expression, indexColumns []*expression.Column, idxColLens []int) bool { - switch v := condition.(type) { - case *expression.Column: - return indexCoveringColumn(ds, v, indexColumns, idxColLens, false) - case *expression.ScalarFunction: - // Even if the index only contains prefix `col`, the index can cover `col is null`. - if v.FuncName.L == ast.IsNull { - if col, ok := v.GetArgs()[0].(*expression.Column); ok { - return indexCoveringColumn(ds, col, indexColumns, idxColLens, true) - } - } - for _, arg := range v.GetArgs() { - if !isIndexCoveringCondition(ds, arg, indexColumns, idxColLens) { - return false - } - } - return true - } - return true -} - -func isSingleScan(ds *DataSource, indexColumns []*expression.Column, idxColLens []int) bool { - if !ds.SCtx().GetSessionVars().OptPrefixIndexSingleScan || ds.ColsRequiringFullLen == nil { - // ds.ColsRequiringFullLen is set at (*DataSource).PruneColumns. In some cases we don't reach (*DataSource).PruneColumns - // and ds.ColsRequiringFullLen is nil, so we fall back to ds.isIndexCoveringColumns(ds.schema.Columns, indexColumns, idxColLens). - return isIndexCoveringColumns(ds, ds.Schema().Columns, indexColumns, idxColLens) - } - if !isIndexCoveringColumns(ds, ds.ColsRequiringFullLen, indexColumns, idxColLens) { - return false - } - for _, cond := range ds.AllConds { - if !isIndexCoveringCondition(ds, cond, indexColumns, idxColLens) { - return false - } - } - return true -} - -// If there is a table reader which needs to keep order, we should append a pk to table scan. -func (ts *PhysicalTableScan) appendExtraHandleCol(ds *DataSource) (*expression.Column, bool) { - handleCols := ds.HandleCols - if handleCols != nil { - return handleCols.GetCol(0), false - } - handleCol := ds.newExtraHandleSchemaCol() - ts.schema.Append(handleCol) - ts.Columns = append(ts.Columns, model.NewExtraHandleColInfo()) - return handleCol, true -} - -// convertToIndexScan converts the DataSource to index scan with idx. -func convertToIndexScan(ds *DataSource, prop *property.PhysicalProperty, - candidate *candidatePath, _ *optimizetrace.PhysicalOptimizeOp) (task base.Task, err error) { - if candidate.path.Index.MVIndex { - // MVIndex is special since different index rows may return the same _row_id and this can break some assumptions of IndexReader. - // Currently only support using IndexMerge to access MVIndex instead of IndexReader. - // TODO: make IndexReader support accessing MVIndex directly. - return base.InvalidTask, nil - } - if !candidate.path.IsSingleScan { - // If it's parent requires single read task, return max cost. - if prop.TaskTp == property.CopSingleReadTaskType { - return base.InvalidTask, nil - } - } else if prop.TaskTp == property.CopMultiReadTaskType { - // If it's parent requires double read task, return max cost. - return base.InvalidTask, nil - } - if !prop.IsSortItemEmpty() && !candidate.isMatchProp { - return base.InvalidTask, nil - } - // If we need to keep order for the index scan, we should forbid the non-keep-order index scan when we try to generate the path. - if prop.IsSortItemEmpty() && candidate.path.ForceKeepOrder { - return base.InvalidTask, nil - } - // If we don't need to keep order for the index scan, we should forbid the non-keep-order index scan when we try to generate the path. - if !prop.IsSortItemEmpty() && candidate.path.ForceNoKeepOrder { - return base.InvalidTask, nil - } - path := candidate.path - is := getOriginalPhysicalIndexScan(ds, prop, path, candidate.isMatchProp, candidate.path.IsSingleScan) - cop := &CopTask{ - indexPlan: is, - tblColHists: ds.TblColHists, - tblCols: ds.TblCols, - expectCnt: uint64(prop.ExpectedCnt), - } - cop.physPlanPartInfo = &PhysPlanPartInfo{ - PruningConds: pushDownNot(ds.SCtx().GetExprCtx(), ds.AllConds), - PartitionNames: ds.PartitionNames, - Columns: ds.TblCols, - ColumnNames: ds.OutputNames(), - } - if !candidate.path.IsSingleScan { - // On this way, it's double read case. - ts := PhysicalTableScan{ - Columns: util.CloneColInfos(ds.Columns), - Table: is.Table, - TableAsName: ds.TableAsName, - DBName: ds.DBName, - isPartition: ds.PartitionDefIdx != nil, - physicalTableID: ds.PhysicalTableID, - tblCols: ds.TblCols, - tblColHists: ds.TblColHists, - }.Init(ds.SCtx(), is.QueryBlockOffset()) - ts.SetSchema(ds.Schema().Clone()) - // We set `StatsVersion` here and fill other fields in `(*copTask).finishIndexPlan`. Since `copTask.indexPlan` may - // change before calling `(*copTask).finishIndexPlan`, we don't know the stats information of `ts` currently and on - // the other hand, it may be hard to identify `StatsVersion` of `ts` in `(*copTask).finishIndexPlan`. - ts.SetStats(&property.StatsInfo{StatsVersion: ds.TableStats.StatsVersion}) - usedStats := ds.SCtx().GetSessionVars().StmtCtx.GetUsedStatsInfo(false) - if usedStats != nil && usedStats.GetUsedInfo(ts.physicalTableID) != nil { - ts.usedStatsInfo = usedStats.GetUsedInfo(ts.physicalTableID) - } - cop.tablePlan = ts - } - task = cop - if cop.tablePlan != nil && ds.TableInfo.IsCommonHandle { - cop.commonHandleCols = ds.CommonHandleCols - commonHandle := ds.HandleCols.(*util.CommonHandleCols) - for _, col := range commonHandle.GetColumns() { - if ds.Schema().ColumnIndex(col) == -1 { - ts := cop.tablePlan.(*PhysicalTableScan) - ts.Schema().Append(col) - ts.Columns = append(ts.Columns, col.ToInfo()) - cop.needExtraProj = true - } - } - } - if candidate.isMatchProp { - cop.keepOrder = true - if cop.tablePlan != nil && !ds.TableInfo.IsCommonHandle { - col, isNew := cop.tablePlan.(*PhysicalTableScan).appendExtraHandleCol(ds) - cop.extraHandleCol = col - cop.needExtraProj = cop.needExtraProj || isNew - } - - if ds.TableInfo.GetPartitionInfo() != nil { - // Add sort items for index scan for merge-sort operation between partitions, only required for local index. - if !is.Index.Global { - byItems := make([]*util.ByItems, 0, len(prop.SortItems)) - for _, si := range prop.SortItems { - byItems = append(byItems, &util.ByItems{ - Expr: si.Col, - Desc: si.Desc, - }) - } - cop.indexPlan.(*PhysicalIndexScan).ByItems = byItems - } - if cop.tablePlan != nil && ds.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { - if !is.Index.Global { - is.Columns, is.schema, _ = AddExtraPhysTblIDColumn(is.SCtx(), is.Columns, is.Schema()) - } - var succ bool - // global index for tableScan with keepOrder also need PhysicalTblID - ts := cop.tablePlan.(*PhysicalTableScan) - ts.Columns, ts.schema, succ = AddExtraPhysTblIDColumn(ts.SCtx(), ts.Columns, ts.Schema()) - cop.needExtraProj = cop.needExtraProj || succ - } - } - } - if cop.needExtraProj { - cop.originSchema = ds.Schema() - } - // prop.IsSortItemEmpty() would always return true when coming to here, - // so we can just use prop.ExpectedCnt as parameter of addPushedDownSelection. - finalStats := ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt) - if err = is.addPushedDownSelection(cop, ds, path, finalStats); err != nil { - return base.InvalidTask, err - } - if prop.TaskTp == property.RootTaskType { - task = task.ConvertToRootTask(ds.SCtx()) - } else if _, ok := task.(*RootTask); ok { - return base.InvalidTask, nil - } - return task, nil -} - -func (is *PhysicalIndexScan) getScanRowSize() float64 { - idx := is.Index - scanCols := make([]*expression.Column, 0, len(idx.Columns)+1) - // If `initSchema` has already appended the handle column in schema, just use schema columns, otherwise, add extra handle column. - if len(idx.Columns) == len(is.schema.Columns) { - scanCols = append(scanCols, is.schema.Columns...) - handleCol := is.pkIsHandleCol - if handleCol != nil { - scanCols = append(scanCols, handleCol) - } - } else { - scanCols = is.schema.Columns - } - return cardinality.GetIndexAvgRowSize(is.SCtx(), is.tblColHists, scanCols, is.Index.Unique) -} - -// initSchema is used to set the schema of PhysicalIndexScan. Before calling this, -// make sure the following field of PhysicalIndexScan are initialized: -// -// PhysicalIndexScan.Table *model.TableInfo -// PhysicalIndexScan.Index *model.IndexInfo -// PhysicalIndexScan.Index.Columns []*IndexColumn -// PhysicalIndexScan.IdxCols []*expression.Column -// PhysicalIndexScan.Columns []*model.ColumnInfo -func (is *PhysicalIndexScan) initSchema(idxExprCols []*expression.Column, isDoubleRead bool) { - indexCols := make([]*expression.Column, len(is.IdxCols), len(is.Index.Columns)+1) - copy(indexCols, is.IdxCols) - - for i := len(is.IdxCols); i < len(is.Index.Columns); i++ { - if idxExprCols[i] != nil { - indexCols = append(indexCols, idxExprCols[i]) - } else { - // TODO: try to reuse the col generated when building the DataSource. - indexCols = append(indexCols, &expression.Column{ - ID: is.Table.Columns[is.Index.Columns[i].Offset].ID, - RetType: &is.Table.Columns[is.Index.Columns[i].Offset].FieldType, - UniqueID: is.SCtx().GetSessionVars().AllocPlanColumnID(), - }) - } - } - is.NeedCommonHandle = is.Table.IsCommonHandle - - if is.NeedCommonHandle { - for i := len(is.Index.Columns); i < len(idxExprCols); i++ { - indexCols = append(indexCols, idxExprCols[i]) - } - } - setHandle := len(indexCols) > len(is.Index.Columns) - if !setHandle { - for i, col := range is.Columns { - if (mysql.HasPriKeyFlag(col.GetFlag()) && is.Table.PKIsHandle) || col.ID == model.ExtraHandleID { - indexCols = append(indexCols, is.dataSourceSchema.Columns[i]) - setHandle = true - break - } - } - } - - var extraPhysTblCol *expression.Column - // If `dataSouceSchema` contains `model.ExtraPhysTblID`, we should add it into `indexScan.schema` - for _, col := range is.dataSourceSchema.Columns { - if col.ID == model.ExtraPhysTblID { - extraPhysTblCol = col.Clone().(*expression.Column) - break - } - } - - if isDoubleRead || is.Index.Global { - // If it's double read case, the first index must return handle. So we should add extra handle column - // if there isn't a handle column. - if !setHandle { - if !is.Table.IsCommonHandle { - indexCols = append(indexCols, &expression.Column{ - RetType: types.NewFieldType(mysql.TypeLonglong), - ID: model.ExtraHandleID, - UniqueID: is.SCtx().GetSessionVars().AllocPlanColumnID(), - OrigName: model.ExtraHandleName.O, - }) - } - } - // If it's global index, handle and PhysTblID columns has to be added, so that needed pids can be filtered. - if is.Index.Global && extraPhysTblCol == nil { - indexCols = append(indexCols, &expression.Column{ - RetType: types.NewFieldType(mysql.TypeLonglong), - ID: model.ExtraPhysTblID, - UniqueID: is.SCtx().GetSessionVars().AllocPlanColumnID(), - OrigName: model.ExtraPhysTblIDName.O, - }) - } - } - - if extraPhysTblCol != nil { - indexCols = append(indexCols, extraPhysTblCol) - } - - is.SetSchema(expression.NewSchema(indexCols...)) -} - -func (is *PhysicalIndexScan) addSelectionConditionForGlobalIndex(p *DataSource, physPlanPartInfo *PhysPlanPartInfo, conditions []expression.Expression) ([]expression.Expression, error) { - if !is.Index.Global { - return conditions, nil - } - args := make([]expression.Expression, 0, len(p.PartitionNames)+1) - for _, col := range is.schema.Columns { - if col.ID == model.ExtraPhysTblID { - args = append(args, col.Clone()) - break - } - } - - if len(args) != 1 { - return nil, errors.Errorf("Can't find column %s in schema %s", model.ExtraPhysTblIDName.O, is.schema) - } - - // For SQL like 'select x from t partition(p0, p1) use index(idx)', - // we will add a `Selection` like `in(t._tidb_pid, p0, p1)` into the plan. - // For truncate/drop partitions, we should only return indexes where partitions still in public state. - idxArr, err := PartitionPruning(p.SCtx(), p.table.GetPartitionedTable(), - physPlanPartInfo.PruningConds, - physPlanPartInfo.PartitionNames, - physPlanPartInfo.Columns, - physPlanPartInfo.ColumnNames) - if err != nil { - return nil, err - } - needNot := false - pInfo := p.TableInfo.GetPartitionInfo() - if len(idxArr) == 1 && idxArr[0] == FullRange { - // Only filter adding and dropping partitions. - if len(pInfo.AddingDefinitions) == 0 && len(pInfo.DroppingDefinitions) == 0 { - return conditions, nil - } - needNot = true - for _, p := range pInfo.AddingDefinitions { - args = append(args, expression.NewInt64Const(p.ID)) - } - for _, p := range pInfo.DroppingDefinitions { - args = append(args, expression.NewInt64Const(p.ID)) - } - } else if len(idxArr) == 0 { - // add an invalid pid as param for `IN` function - args = append(args, expression.NewInt64Const(-1)) - } else { - // `PartitionPruning`` func does not return adding and dropping partitions - for _, idx := range idxArr { - args = append(args, expression.NewInt64Const(pInfo.Definitions[idx].ID)) - } - } - condition, err := expression.NewFunction(p.SCtx().GetExprCtx(), ast.In, types.NewFieldType(mysql.TypeLonglong), args...) - if err != nil { - return nil, err - } - if needNot { - condition, err = expression.NewFunction(p.SCtx().GetExprCtx(), ast.UnaryNot, types.NewFieldType(mysql.TypeLonglong), condition) - if err != nil { - return nil, err - } - } - return append(conditions, condition), nil -} - -func (is *PhysicalIndexScan) addPushedDownSelection(copTask *CopTask, p *DataSource, path *util.AccessPath, finalStats *property.StatsInfo) error { - // Add filter condition to table plan now. - indexConds, tableConds := path.IndexFilters, path.TableFilters - tableConds, copTask.rootTaskConds = SplitSelCondsWithVirtualColumn(tableConds) - - var newRootConds []expression.Expression - pctx := util.GetPushDownCtx(is.SCtx()) - indexConds, newRootConds = expression.PushDownExprs(pctx, indexConds, kv.TiKV) - copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...) - - tableConds, newRootConds = expression.PushDownExprs(pctx, tableConds, kv.TiKV) - copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...) - - // Add a `Selection` for `IndexScan` with global index. - // It should pushdown to TiKV, DataSource schema doesn't contain partition id column. - indexConds, err := is.addSelectionConditionForGlobalIndex(p, copTask.physPlanPartInfo, indexConds) - if err != nil { - return err - } - - if indexConds != nil { - var selectivity float64 - if path.CountAfterAccess > 0 { - selectivity = path.CountAfterIndex / path.CountAfterAccess - } - count := is.StatsInfo().RowCount * selectivity - stats := p.TableStats.ScaleByExpectCnt(count) - indexSel := PhysicalSelection{Conditions: indexConds}.Init(is.SCtx(), stats, is.QueryBlockOffset()) - indexSel.SetChildren(is) - copTask.indexPlan = indexSel - } - if len(tableConds) > 0 { - copTask.finishIndexPlan() - tableSel := PhysicalSelection{Conditions: tableConds}.Init(is.SCtx(), finalStats, is.QueryBlockOffset()) - if len(copTask.rootTaskConds) != 0 { - selectivity, _, err := cardinality.Selectivity(is.SCtx(), copTask.tblColHists, tableConds, nil) - if err != nil { - logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) - selectivity = cost.SelectionFactor - } - tableSel.SetStats(copTask.Plan().StatsInfo().Scale(selectivity)) - } - tableSel.SetChildren(copTask.tablePlan) - copTask.tablePlan = tableSel - } - return nil -} - -// NeedExtraOutputCol is designed for check whether need an extra column for -// pid or physical table id when build indexReq. -func (is *PhysicalIndexScan) NeedExtraOutputCol() bool { - if is.Table.Partition == nil { - return false - } - // has global index, should return pid - if is.Index.Global { - return true - } - // has embedded limit, should return physical table id - if len(is.ByItems) != 0 && is.SCtx().GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { - return true - } - return false -} - -// SplitSelCondsWithVirtualColumn filter the select conditions which contain virtual column -func SplitSelCondsWithVirtualColumn(conds []expression.Expression) (withoutVirt []expression.Expression, withVirt []expression.Expression) { - for i := range conds { - if expression.ContainVirtualColumn(conds[i : i+1]) { - withVirt = append(withVirt, conds[i]) - } else { - withoutVirt = append(withoutVirt, conds[i]) - } - } - return withoutVirt, withVirt -} - -func matchIndicesProp(sctx base.PlanContext, idxCols []*expression.Column, colLens []int, propItems []property.SortItem) bool { - if len(idxCols) < len(propItems) { - return false - } - for i, item := range propItems { - if colLens[i] != types.UnspecifiedLength || !item.Col.EqualByExprAndID(sctx.GetExprCtx().GetEvalCtx(), idxCols[i]) { - return false - } - } - return true -} - -func splitIndexFilterConditions(ds *DataSource, conditions []expression.Expression, indexColumns []*expression.Column, - idxColLens []int) (indexConds, tableConds []expression.Expression) { - var indexConditions, tableConditions []expression.Expression - for _, cond := range conditions { - var covered bool - if ds.SCtx().GetSessionVars().OptPrefixIndexSingleScan { - covered = isIndexCoveringCondition(ds, cond, indexColumns, idxColLens) - } else { - covered = isIndexCoveringColumns(ds, expression.ExtractColumns(cond), indexColumns, idxColLens) - } - if covered { - indexConditions = append(indexConditions, cond) - } else { - tableConditions = append(tableConditions, cond) - } - } - return indexConditions, tableConditions -} - -// GetPhysicalScan4LogicalTableScan returns PhysicalTableScan for the LogicalTableScan. -func GetPhysicalScan4LogicalTableScan(s *LogicalTableScan, schema *expression.Schema, stats *property.StatsInfo) *PhysicalTableScan { - ds := s.Source - ts := PhysicalTableScan{ - Table: ds.TableInfo, - Columns: ds.Columns, - TableAsName: ds.TableAsName, - DBName: ds.DBName, - isPartition: ds.PartitionDefIdx != nil, - physicalTableID: ds.PhysicalTableID, - Ranges: s.Ranges, - AccessCondition: s.AccessConds, - tblCols: ds.TblCols, - tblColHists: ds.TblColHists, - }.Init(s.SCtx(), s.QueryBlockOffset()) - ts.SetStats(stats) - ts.SetSchema(schema.Clone()) - return ts -} - -// GetPhysicalIndexScan4LogicalIndexScan returns PhysicalIndexScan for the logical IndexScan. -func GetPhysicalIndexScan4LogicalIndexScan(s *LogicalIndexScan, _ *expression.Schema, stats *property.StatsInfo) *PhysicalIndexScan { - ds := s.Source - is := PhysicalIndexScan{ - Table: ds.TableInfo, - TableAsName: ds.TableAsName, - DBName: ds.DBName, - Columns: s.Columns, - Index: s.Index, - IdxCols: s.IdxCols, - IdxColLens: s.IdxColLens, - AccessCondition: s.AccessConds, - Ranges: s.Ranges, - dataSourceSchema: ds.Schema(), - isPartition: ds.PartitionDefIdx != nil, - physicalTableID: ds.PhysicalTableID, - tblColHists: ds.TblColHists, - pkIsHandleCol: ds.getPKIsHandleCol(), - }.Init(ds.SCtx(), ds.QueryBlockOffset()) - is.SetStats(stats) - is.initSchema(s.FullIdxCols, s.IsDoubleRead) - return is -} - -// isPointGetPath indicates whether the conditions are point-get-able. -// eg: create table t(a int, b int,c int unique, primary (a,b)) -// select * from t where a = 1 and b = 1 and c =1; -// the datasource can access by primary key(a,b) or unique key c which are both point-get-able -func isPointGetPath(ds *DataSource, path *util.AccessPath) bool { - if len(path.Ranges) < 1 { - return false - } - if !path.IsIntHandlePath { - if path.Index == nil { - return false - } - if !path.Index.Unique || path.Index.HasPrefixIndex() { - return false - } - idxColsLen := len(path.Index.Columns) - for _, ran := range path.Ranges { - if len(ran.LowVal) != idxColsLen { - return false - } - } - } - tc := ds.SCtx().GetSessionVars().StmtCtx.TypeCtx() - for _, ran := range path.Ranges { - if !ran.IsPointNonNullable(tc) { - return false - } - } - return true -} - -// convertToTableScan converts the DataSource to table scan. -func convertToTableScan(ds *DataSource, prop *property.PhysicalProperty, candidate *candidatePath, _ *optimizetrace.PhysicalOptimizeOp) (base.Task, error) { - // It will be handled in convertToIndexScan. - if prop.TaskTp == property.CopMultiReadTaskType { - return base.InvalidTask, nil - } - if !prop.IsSortItemEmpty() && !candidate.isMatchProp { - return base.InvalidTask, nil - } - // If we need to keep order for the index scan, we should forbid the non-keep-order index scan when we try to generate the path. - if prop.IsSortItemEmpty() && candidate.path.ForceKeepOrder { - return base.InvalidTask, nil - } - // If we don't need to keep order for the index scan, we should forbid the non-keep-order index scan when we try to generate the path. - if !prop.IsSortItemEmpty() && candidate.path.ForceNoKeepOrder { - return base.InvalidTask, nil - } - ts, _ := getOriginalPhysicalTableScan(ds, prop, candidate.path, candidate.isMatchProp) - if ts.KeepOrder && ts.StoreType == kv.TiFlash && (ts.Desc || ds.SCtx().GetSessionVars().TiFlashFastScan) { - // TiFlash fast mode(https://github.com/pingcap/tidb/pull/35851) does not keep order in TableScan - return base.InvalidTask, nil - } - - // In disaggregated tiflash mode, only MPP is allowed, cop and batchCop is deprecated. - // So if prop.TaskTp is RootTaskType, have to use mppTask then convert to rootTask. - isTiFlashPath := ts.StoreType == kv.TiFlash - canMppConvertToRoot := prop.TaskTp == property.RootTaskType && ds.SCtx().GetSessionVars().IsMPPAllowed() && isTiFlashPath - canMppConvertToRootForDisaggregatedTiFlash := config.GetGlobalConfig().DisaggregatedTiFlash && canMppConvertToRoot - canMppConvertToRootForWhenTiFlashCopIsBanned := ds.SCtx().GetSessionVars().IsTiFlashCopBanned() && canMppConvertToRoot - if prop.TaskTp == property.MppTaskType || canMppConvertToRootForDisaggregatedTiFlash || canMppConvertToRootForWhenTiFlashCopIsBanned { - if ts.KeepOrder { - return base.InvalidTask, nil - } - if prop.MPPPartitionTp != property.AnyType { - return base.InvalidTask, nil - } - // ********************************** future deprecated start **************************/ - var hasVirtualColumn bool - for _, col := range ts.schema.Columns { - if col.VirtualExpr != nil { - ds.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because column `" + col.OrigName + "` is a virtual column which is not supported now.") - hasVirtualColumn = true - break - } - } - // in general, since MPP has supported the Gather operator to fill the virtual column, we should full lift restrictions here. - // we left them here, because cases like: - // parent-----+ - // V (when parent require a root task type here, we need convert mpp task to root task) - // projection [mpp task] [a] - // table-scan [mpp task] [a(virtual col as: b+1), b] - // in the process of converting mpp task to root task, the encapsulated table reader will use its first children schema [a] - // as its schema, so when we resolve indices later, the virtual column 'a' itself couldn't resolve itself anymore. - // - if hasVirtualColumn && !canMppConvertToRootForDisaggregatedTiFlash && !canMppConvertToRootForWhenTiFlashCopIsBanned { - return base.InvalidTask, nil - } - // ********************************** future deprecated end **************************/ - mppTask := &MppTask{ - p: ts, - partTp: property.AnyType, - tblColHists: ds.TblColHists, - } - ts.PlanPartInfo = &PhysPlanPartInfo{ - PruningConds: pushDownNot(ds.SCtx().GetExprCtx(), ds.AllConds), - PartitionNames: ds.PartitionNames, - Columns: ds.TblCols, - ColumnNames: ds.OutputNames(), - } - mppTask = ts.addPushedDownSelectionToMppTask(mppTask, ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt)) - var task base.Task = mppTask - if !mppTask.Invalid() { - if prop.TaskTp == property.MppTaskType && len(mppTask.rootTaskConds) > 0 { - // If got filters cannot be pushed down to tiflash, we have to make sure it will be executed in TiDB, - // So have to return a rootTask, but prop requires mppTask, cannot meet this requirement. - task = base.InvalidTask - } else if prop.TaskTp == property.RootTaskType { - // When got here, canMppConvertToRootX is true. - // This is for situations like cannot generate mppTask for some operators. - // Such as when the build side of HashJoin is Projection, - // which cannot pushdown to tiflash(because TiFlash doesn't support some expr in Proj) - // So HashJoin cannot pushdown to tiflash. But we still want TableScan to run on tiflash. - task = mppTask - task = task.ConvertToRootTask(ds.SCtx()) - } - } - return task, nil - } - if isTiFlashPath && config.GetGlobalConfig().DisaggregatedTiFlash || isTiFlashPath && ds.SCtx().GetSessionVars().IsTiFlashCopBanned() { - // prop.TaskTp is cop related, just return base.InvalidTask. - return base.InvalidTask, nil - } - copTask := &CopTask{ - tablePlan: ts, - indexPlanFinished: true, - tblColHists: ds.TblColHists, - } - copTask.physPlanPartInfo = &PhysPlanPartInfo{ - PruningConds: pushDownNot(ds.SCtx().GetExprCtx(), ds.AllConds), - PartitionNames: ds.PartitionNames, - Columns: ds.TblCols, - ColumnNames: ds.OutputNames(), - } - ts.PlanPartInfo = copTask.physPlanPartInfo - var task base.Task = copTask - if candidate.isMatchProp { - copTask.keepOrder = true - if ds.TableInfo.GetPartitionInfo() != nil { - // TableScan on partition table on TiFlash can't keep order. - if ts.StoreType == kv.TiFlash { - return base.InvalidTask, nil - } - // Add sort items for table scan for merge-sort operation between partitions. - byItems := make([]*util.ByItems, 0, len(prop.SortItems)) - for _, si := range prop.SortItems { - byItems = append(byItems, &util.ByItems{ - Expr: si.Col, - Desc: si.Desc, - }) - } - ts.ByItems = byItems - } - } - ts.addPushedDownSelection(copTask, ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt)) - if prop.IsFlashProp() && len(copTask.rootTaskConds) != 0 { - return base.InvalidTask, nil - } - if prop.TaskTp == property.RootTaskType { - task = task.ConvertToRootTask(ds.SCtx()) - } else if _, ok := task.(*RootTask); ok { - return base.InvalidTask, nil - } - return task, nil -} - -func convertToSampleTable(ds *DataSource, prop *property.PhysicalProperty, - candidate *candidatePath, _ *optimizetrace.PhysicalOptimizeOp) (base.Task, error) { - if prop.TaskTp == property.CopMultiReadTaskType { - return base.InvalidTask, nil - } - if !prop.IsSortItemEmpty() && !candidate.isMatchProp { - return base.InvalidTask, nil - } - if candidate.isMatchProp { - // Disable keep order property for sample table path. - return base.InvalidTask, nil - } - p := PhysicalTableSample{ - TableSampleInfo: ds.SampleInfo, - TableInfo: ds.table, - PhysicalTableID: ds.PhysicalTableID, - Desc: candidate.isMatchProp && prop.SortItems[0].Desc, - }.Init(ds.SCtx(), ds.QueryBlockOffset()) - p.schema = ds.Schema() - rt := &RootTask{} - rt.SetPlan(p) - return rt, nil -} - -func convertToPointGet(ds *DataSource, prop *property.PhysicalProperty, candidate *candidatePath) base.Task { - if !prop.IsSortItemEmpty() && !candidate.isMatchProp { - return base.InvalidTask - } - if prop.TaskTp == property.CopMultiReadTaskType && candidate.path.IsSingleScan || - prop.TaskTp == property.CopSingleReadTaskType && !candidate.path.IsSingleScan { - return base.InvalidTask - } - - if tidbutil.IsMemDB(ds.DBName.L) { - return base.InvalidTask - } - - accessCnt := math.Min(candidate.path.CountAfterAccess, float64(1)) - pointGetPlan := PointGetPlan{ - ctx: ds.SCtx(), - AccessConditions: candidate.path.AccessConds, - schema: ds.Schema().Clone(), - dbName: ds.DBName.L, - TblInfo: ds.TableInfo, - outputNames: ds.OutputNames(), - LockWaitTime: ds.SCtx().GetSessionVars().LockWaitTimeout, - Columns: ds.Columns, - }.Init(ds.SCtx(), ds.TableStats.ScaleByExpectCnt(accessCnt), ds.QueryBlockOffset()) - if ds.PartitionDefIdx != nil { - pointGetPlan.PartitionIdx = ds.PartitionDefIdx - } - pointGetPlan.PartitionNames = ds.PartitionNames - rTsk := &RootTask{} - rTsk.SetPlan(pointGetPlan) - if candidate.path.IsIntHandlePath { - pointGetPlan.Handle = kv.IntHandle(candidate.path.Ranges[0].LowVal[0].GetInt64()) - pointGetPlan.UnsignedHandle = mysql.HasUnsignedFlag(ds.HandleCols.GetCol(0).RetType.GetFlag()) - pointGetPlan.accessCols = ds.TblCols - found := false - for i := range ds.Columns { - if ds.Columns[i].ID == ds.HandleCols.GetCol(0).ID { - pointGetPlan.HandleColOffset = ds.Columns[i].Offset - found = true - break - } - } - if !found { - return base.InvalidTask - } - // Add filter condition to table plan now. - if len(candidate.path.TableFilters) > 0 { - sel := PhysicalSelection{ - Conditions: candidate.path.TableFilters, - }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) - sel.SetChildren(pointGetPlan) - rTsk.SetPlan(sel) - } - } else { - pointGetPlan.IndexInfo = candidate.path.Index - pointGetPlan.IdxCols = candidate.path.IdxCols - pointGetPlan.IdxColLens = candidate.path.IdxColLens - pointGetPlan.IndexValues = candidate.path.Ranges[0].LowVal - if candidate.path.IsSingleScan { - pointGetPlan.accessCols = candidate.path.IdxCols - } else { - pointGetPlan.accessCols = ds.TblCols - } - // Add index condition to table plan now. - if len(candidate.path.IndexFilters)+len(candidate.path.TableFilters) > 0 { - sel := PhysicalSelection{ - Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...), - }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) - sel.SetChildren(pointGetPlan) - rTsk.SetPlan(sel) - } - } - - return rTsk -} - -func convertToBatchPointGet(ds *DataSource, prop *property.PhysicalProperty, candidate *candidatePath) base.Task { - if !prop.IsSortItemEmpty() && !candidate.isMatchProp { - return base.InvalidTask - } - if prop.TaskTp == property.CopMultiReadTaskType && candidate.path.IsSingleScan || - prop.TaskTp == property.CopSingleReadTaskType && !candidate.path.IsSingleScan { - return base.InvalidTask - } - - accessCnt := math.Min(candidate.path.CountAfterAccess, float64(len(candidate.path.Ranges))) - batchPointGetPlan := &BatchPointGetPlan{ - ctx: ds.SCtx(), - dbName: ds.DBName.L, - AccessConditions: candidate.path.AccessConds, - TblInfo: ds.TableInfo, - KeepOrder: !prop.IsSortItemEmpty(), - Columns: ds.Columns, - PartitionNames: ds.PartitionNames, - } - if ds.PartitionDefIdx != nil { - batchPointGetPlan.SinglePartition = true - batchPointGetPlan.PartitionIdxs = []int{*ds.PartitionDefIdx} - } - if batchPointGetPlan.KeepOrder { - batchPointGetPlan.Desc = prop.SortItems[0].Desc - } - rTsk := &RootTask{} - if candidate.path.IsIntHandlePath { - for _, ran := range candidate.path.Ranges { - batchPointGetPlan.Handles = append(batchPointGetPlan.Handles, kv.IntHandle(ran.LowVal[0].GetInt64())) - } - batchPointGetPlan.accessCols = ds.TblCols - found := false - for i := range ds.Columns { - if ds.Columns[i].ID == ds.HandleCols.GetCol(0).ID { - batchPointGetPlan.HandleColOffset = ds.Columns[i].Offset - found = true - break - } - } - if !found { - return base.InvalidTask - } - - // Add filter condition to table plan now. - if len(candidate.path.TableFilters) > 0 { - batchPointGetPlan.Init(ds.SCtx(), ds.TableStats.ScaleByExpectCnt(accessCnt), ds.Schema().Clone(), ds.OutputNames(), ds.QueryBlockOffset()) - sel := PhysicalSelection{ - Conditions: candidate.path.TableFilters, - }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) - sel.SetChildren(batchPointGetPlan) - rTsk.SetPlan(sel) - } - } else { - batchPointGetPlan.IndexInfo = candidate.path.Index - batchPointGetPlan.IdxCols = candidate.path.IdxCols - batchPointGetPlan.IdxColLens = candidate.path.IdxColLens - for _, ran := range candidate.path.Ranges { - batchPointGetPlan.IndexValues = append(batchPointGetPlan.IndexValues, ran.LowVal) - } - if !prop.IsSortItemEmpty() { - batchPointGetPlan.KeepOrder = true - batchPointGetPlan.Desc = prop.SortItems[0].Desc - } - if candidate.path.IsSingleScan { - batchPointGetPlan.accessCols = candidate.path.IdxCols - } else { - batchPointGetPlan.accessCols = ds.TblCols - } - // Add index condition to table plan now. - if len(candidate.path.IndexFilters)+len(candidate.path.TableFilters) > 0 { - batchPointGetPlan.Init(ds.SCtx(), ds.TableStats.ScaleByExpectCnt(accessCnt), ds.Schema().Clone(), ds.OutputNames(), ds.QueryBlockOffset()) - sel := PhysicalSelection{ - Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...), - }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) - sel.SetChildren(batchPointGetPlan) - rTsk.SetPlan(sel) - } - } - if rTsk.GetPlan() == nil { - tmpP := batchPointGetPlan.Init(ds.SCtx(), ds.TableStats.ScaleByExpectCnt(accessCnt), ds.Schema().Clone(), ds.OutputNames(), ds.QueryBlockOffset()) - rTsk.SetPlan(tmpP) - } - - return rTsk -} - -func (ts *PhysicalTableScan) addPushedDownSelectionToMppTask(mpp *MppTask, stats *property.StatsInfo) *MppTask { - filterCondition, rootTaskConds := SplitSelCondsWithVirtualColumn(ts.filterCondition) - var newRootConds []expression.Expression - filterCondition, newRootConds = expression.PushDownExprs(util.GetPushDownCtx(ts.SCtx()), filterCondition, ts.StoreType) - mpp.rootTaskConds = append(rootTaskConds, newRootConds...) - - ts.filterCondition = filterCondition - // Add filter condition to table plan now. - if len(ts.filterCondition) > 0 { - sel := PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.SCtx(), stats, ts.QueryBlockOffset()) - sel.SetChildren(ts) - mpp.p = sel - } - return mpp -} - -func (ts *PhysicalTableScan) addPushedDownSelection(copTask *CopTask, stats *property.StatsInfo) { - ts.filterCondition, copTask.rootTaskConds = SplitSelCondsWithVirtualColumn(ts.filterCondition) - var newRootConds []expression.Expression - ts.filterCondition, newRootConds = expression.PushDownExprs(util.GetPushDownCtx(ts.SCtx()), ts.filterCondition, ts.StoreType) - copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...) - - // Add filter condition to table plan now. - if len(ts.filterCondition) > 0 { - sel := PhysicalSelection{Conditions: ts.filterCondition}.Init(ts.SCtx(), stats, ts.QueryBlockOffset()) - if len(copTask.rootTaskConds) != 0 { - selectivity, _, err := cardinality.Selectivity(ts.SCtx(), copTask.tblColHists, ts.filterCondition, nil) - if err != nil { - logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) - selectivity = cost.SelectionFactor - } - sel.SetStats(ts.StatsInfo().Scale(selectivity)) - } - sel.SetChildren(ts) - copTask.tablePlan = sel - } -} - -func (ts *PhysicalTableScan) getScanRowSize() float64 { - if ts.StoreType == kv.TiKV { - return cardinality.GetTableAvgRowSize(ts.SCtx(), ts.tblColHists, ts.tblCols, ts.StoreType, true) - } - // If `ts.handleCol` is nil, then the schema of tableScan doesn't have handle column. - // This logic can be ensured in column pruning. - return cardinality.GetTableAvgRowSize(ts.SCtx(), ts.tblColHists, ts.Schema().Columns, ts.StoreType, ts.HandleCols != nil) -} - -func getOriginalPhysicalTableScan(ds *DataSource, prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool) (*PhysicalTableScan, float64) { - ts := PhysicalTableScan{ - Table: ds.TableInfo, - Columns: slices.Clone(ds.Columns), - TableAsName: ds.TableAsName, - DBName: ds.DBName, - isPartition: ds.PartitionDefIdx != nil, - physicalTableID: ds.PhysicalTableID, - Ranges: path.Ranges, - AccessCondition: path.AccessConds, - StoreType: path.StoreType, - HandleCols: ds.HandleCols, - tblCols: ds.TblCols, - tblColHists: ds.TblColHists, - constColsByCond: path.ConstCols, - prop: prop, - filterCondition: slices.Clone(path.TableFilters), - }.Init(ds.SCtx(), ds.QueryBlockOffset()) - ts.SetSchema(ds.Schema().Clone()) - rowCount := path.CountAfterAccess - if prop.ExpectedCnt < ds.StatsInfo().RowCount { - rowCount = cardinality.AdjustRowCountForTableScanByLimit(ds.SCtx(), - ds.StatsInfo(), ds.TableStats, ds.StatisticTable, - path, prop.ExpectedCnt, isMatchProp && prop.SortItems[0].Desc) - } - // We need NDV of columns since it may be used in cost estimation of join. Precisely speaking, - // we should track NDV of each histogram bucket, and sum up the NDV of buckets we actually need - // to scan, but this would only help improve accuracy of NDV for one column, for other columns, - // we still need to assume values are uniformly distributed. For simplicity, we use uniform-assumption - // for all columns now, as we do in `deriveStatsByFilter`. - ts.SetStats(ds.TableStats.ScaleByExpectCnt(rowCount)) - usedStats := ds.SCtx().GetSessionVars().StmtCtx.GetUsedStatsInfo(false) - if usedStats != nil && usedStats.GetUsedInfo(ts.physicalTableID) != nil { - ts.usedStatsInfo = usedStats.GetUsedInfo(ts.physicalTableID) - } - if isMatchProp { - ts.Desc = prop.SortItems[0].Desc - ts.KeepOrder = true - } - return ts, rowCount -} - -func getOriginalPhysicalIndexScan(ds *DataSource, prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool, isSingleScan bool) *PhysicalIndexScan { - idx := path.Index - is := PhysicalIndexScan{ - Table: ds.TableInfo, - TableAsName: ds.TableAsName, - DBName: ds.DBName, - Columns: util.CloneColInfos(ds.Columns), - Index: idx, - IdxCols: path.IdxCols, - IdxColLens: path.IdxColLens, - AccessCondition: path.AccessConds, - Ranges: path.Ranges, - dataSourceSchema: ds.Schema(), - isPartition: ds.PartitionDefIdx != nil, - physicalTableID: ds.PhysicalTableID, - tblColHists: ds.TblColHists, - pkIsHandleCol: ds.getPKIsHandleCol(), - constColsByCond: path.ConstCols, - prop: prop, - }.Init(ds.SCtx(), ds.QueryBlockOffset()) - rowCount := path.CountAfterAccess - is.initSchema(append(path.FullIdxCols, ds.CommonHandleCols...), !isSingleScan) - - // If (1) there exists an index whose selectivity is smaller than the threshold, - // and (2) there is Selection on the IndexScan, we don't use the ExpectedCnt to - // adjust the estimated row count of the IndexScan. - ignoreExpectedCnt := ds.AccessPathMinSelectivity < ds.SCtx().GetSessionVars().OptOrderingIdxSelThresh && - len(path.IndexFilters)+len(path.TableFilters) > 0 - - if (isMatchProp || prop.IsSortItemEmpty()) && prop.ExpectedCnt < ds.StatsInfo().RowCount && !ignoreExpectedCnt { - rowCount = cardinality.AdjustRowCountForIndexScanByLimit(ds.SCtx(), - ds.StatsInfo(), ds.TableStats, ds.StatisticTable, - path, prop.ExpectedCnt, isMatchProp && prop.SortItems[0].Desc) - } - // ScaleByExpectCnt only allows to scale the row count smaller than the table total row count. - // But for MV index, it's possible that the IndexRangeScan row count is larger than the table total row count. - // Please see the Case 2 in CalcTotalSelectivityForMVIdxPath for an example. - if idx.MVIndex && rowCount > ds.TableStats.RowCount { - is.SetStats(ds.TableStats.Scale(rowCount / ds.TableStats.RowCount)) - } else { - is.SetStats(ds.TableStats.ScaleByExpectCnt(rowCount)) - } - usedStats := ds.SCtx().GetSessionVars().StmtCtx.GetUsedStatsInfo(false) - if usedStats != nil && usedStats.GetUsedInfo(is.physicalTableID) != nil { - is.usedStatsInfo = usedStats.GetUsedInfo(is.physicalTableID) - } - if isMatchProp { - is.Desc = prop.SortItems[0].Desc - is.KeepOrder = true - } - return is -} - -func findBestTask4LogicalCTE(lp base.LogicalPlan, prop *property.PhysicalProperty, counter *base.PlanCounterTp, pop *optimizetrace.PhysicalOptimizeOp) (t base.Task, cntPlan int64, err error) { - p := lp.(*logicalop.LogicalCTE) - if p.ChildLen() > 0 { - return p.BaseLogicalPlan.FindBestTask(prop, counter, pop) - } - if !prop.IsSortItemEmpty() && !prop.CanAddEnforcer { - return base.InvalidTask, 1, nil - } - // The physical plan has been build when derive stats. - pcte := PhysicalCTE{SeedPlan: p.Cte.SeedPartPhysicalPlan, RecurPlan: p.Cte.RecursivePartPhysicalPlan, CTE: p.Cte, cteAsName: p.CteAsName, cteName: p.CteName}.Init(p.SCtx(), p.StatsInfo()) - pcte.SetSchema(p.Schema()) - if prop.IsFlashProp() && prop.CTEProducerStatus == property.AllCTECanMpp { - pcte.readerReceiver = PhysicalExchangeReceiver{IsCTEReader: true}.Init(p.SCtx(), p.StatsInfo()) - if prop.MPPPartitionTp != property.AnyType { - return base.InvalidTask, 1, nil - } - t = &MppTask{ - p: pcte, - partTp: prop.MPPPartitionTp, - hashCols: prop.MPPPartitionCols, - tblColHists: p.StatsInfo().HistColl, - } - } else { - rt := &RootTask{} - rt.SetPlan(pcte) - rt.SetEmpty(false) - t = rt - } - if prop.CanAddEnforcer { - t = enforceProperty(prop, t, p.Plan.SCtx()) - } - return t, 1, nil -} - -func findBestTask4LogicalCTETable(lp base.LogicalPlan, prop *property.PhysicalProperty, _ *base.PlanCounterTp, _ *optimizetrace.PhysicalOptimizeOp) (t base.Task, cntPlan int64, err error) { - p := lp.(*logicalop.LogicalCTETable) - if !prop.IsSortItemEmpty() { - return base.InvalidTask, 0, nil - } - - pcteTable := PhysicalCTETable{IDForStorage: p.IDForStorage}.Init(p.SCtx(), p.StatsInfo()) - pcteTable.SetSchema(p.Schema()) - rt := &RootTask{} - rt.SetPlan(pcteTable) - t = rt - return t, 1, nil -} - -func appendCandidate(lp base.LogicalPlan, task base.Task, prop *property.PhysicalProperty, opt *optimizetrace.PhysicalOptimizeOp) { - if task == nil || task.Invalid() { - return - } - utilfuncp.AppendCandidate4PhysicalOptimizeOp(opt, lp, task.Plan(), prop) -} - -// PushDownNot here can convert condition 'not (a != 1)' to 'a = 1'. When we build range from conds, the condition like -// 'not (a != 1)' would not be handled so we need to convert it to 'a = 1', which can be handled when building range. -func pushDownNot(ctx expression.BuildContext, conds []expression.Expression) []expression.Expression { - for i, cond := range conds { - conds[i] = expression.PushDownNot(ctx, cond) - } - return conds -} - -func validateTableSamplePlan(ds *DataSource, t base.Task, err error) error { - if err != nil { - return err - } - if ds.SampleInfo != nil && !t.Invalid() { - if _, ok := t.Plan().(*PhysicalTableSample); !ok { - return expression.ErrInvalidTableSample.GenWithStackByArgs("plan not supported") - } - } - return nil -} diff --git a/planner/core/casetest/testdata/plan_suite_out.json b/planner/core/casetest/testdata/plan_suite_out.json index 214a24a6fb1c0..16e2507456f5b 100644 --- a/planner/core/casetest/testdata/plan_suite_out.json +++ b/planner/core/casetest/testdata/plan_suite_out.json @@ -406,11 +406,11 @@ "└─ExchangeSender 4439.11 mpp[tiflash] ExchangeType: PassThrough", " └─Projection 4439.11 mpp[tiflash] test.t.a, Column#5", " └─Projection 4439.11 mpp[tiflash] Column#5, test.t.a", - " └─HashAgg 4439.11 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#16)->Column#5, funcs:firstrow(test.t.a)->test.t.a", + " └─HashAgg 4439.11 mpp[tiflash] group by:test.t.a, test.t.c, funcs:sum(Column#10)->Column#5, funcs:firstrow(test.t.a)->test.t.a", " └─ExchangeReceiver 4439.11 mpp[tiflash] ", " └─ExchangeSender 4439.11 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: test.t.a, collate: binary], [name: test.t.c, collate: binary]", - " └─HashAgg 4439.11 mpp[tiflash] group by:Column#19, Column#20, funcs:sum(Column#18)->Column#16", - " └─Projection 5548.89 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#18, test.t.a, test.t.c", + " └─HashAgg 4439.11 mpp[tiflash] group by:Column#13, Column#14, funcs:sum(Column#12)->Column#10", + " └─Projection 5548.89 mpp[tiflash] cast(test.t.b, decimal(10,0) BINARY)->Column#12, test.t.a, test.t.c", " └─Selection 5548.89 mpp[tiflash] or(lt(test.t.b, 2), gt(test.t.a, 2))", " └─TableFullScan 10000.00 mpp[tiflash] table:t pushed down filter:empty, keep order:false, stats:pseudo" ], diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 673d1463e5374..7430318b6e772 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -963,6 +963,10 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter for _, candidate := range candidates { path := candidate.path if path.PartialIndexPaths != nil { + // prefer tiflash, while current table path is tikv, skip it. + if ds.preferStoreType&preferTiFlash != 0 && path.StoreType == kv.TiKV { + continue + } idxMergeTask, err := ds.convertToIndexMergeScan(prop, candidate, opt) if err != nil { return nil, 0, err