From 544a76778f49bb8a96172171a63b6b6ab848e929 Mon Sep 17 00:00:00 2001 From: Arenatlx <314806019@qq.com> Date: Mon, 20 Nov 2023 08:53:42 +0800 Subject: [PATCH] planner: fix potential modification of shared immutable aggDesc (#48672) close pingcap/tidb#48643 --- planner/core/physical_plans.go | 16 +++++++++++++--- planner/core/testdata/enforce_mpp_suite_out.json | 12 ++++++------ 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 195f746976860..ee4a226e6c2e5 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -1163,10 +1163,20 @@ func (p *PhysicalHashAgg) Clone() (PhysicalPlan, error) { // NewPhysicalHashAgg creates a new PhysicalHashAgg from a LogicalAggregation. func NewPhysicalHashAgg(la *LogicalAggregation, newStats *property.StatsInfo, prop *property.PhysicalProperty) *PhysicalHashAgg { + newGbyItems := make([]expression.Expression, len(la.GroupByItems)) + copy(newGbyItems, la.GroupByItems) + newAggFuncs := make([]*aggregation.AggFuncDesc, len(la.AggFuncs)) + // There's some places that rewrites the aggFunc in-place. + // I clone it first. + // It needs a well refactor to make sure that the physical optimize should not change the things of logical plan. + // It's bad for cascades + for i, aggFunc := range la.AggFuncs { + newAggFuncs[i] = aggFunc.Clone() + } agg := basePhysicalAgg{ - GroupByItems: la.GroupByItems, - AggFuncs: la.AggFuncs, - }.initForHash(la.ctx, newStats, la.blockOffset, prop) + GroupByItems: newGbyItems, + AggFuncs: newAggFuncs, + }.initForHash(la.SCtx(), newStats, la.SelectBlockOffset(), prop) return agg } diff --git a/planner/core/testdata/enforce_mpp_suite_out.json b/planner/core/testdata/enforce_mpp_suite_out.json index 9d30b25248207..2702fa00572af 100644 --- a/planner/core/testdata/enforce_mpp_suite_out.json +++ b/planner/core/testdata/enforce_mpp_suite_out.json @@ -676,10 +676,10 @@ { "SQL": "EXPLAIN select o.o_id, count(*) from c, o where c.c_id=o.c_id group by o.o_id; -- 2. test agg push down, group by non-join column", "Plan": [ - "TableReader_84 8000.00 root data:ExchangeSender_83", - "└─ExchangeSender_83 8000.00 mpp[tiflash] ExchangeType: PassThrough", + "TableReader_78 8000.00 root data:ExchangeSender_77", + "└─ExchangeSender_77 8000.00 mpp[tiflash] ExchangeType: PassThrough", " └─Projection_10 8000.00 mpp[tiflash] test.o.o_id, Column#6", - " └─Projection_77 8000.00 mpp[tiflash] Column#6, test.o.o_id", + " └─Projection_76 8000.00 mpp[tiflash] Column#6, test.o.o_id", " └─HashAgg_75 8000.00 mpp[tiflash] group by:test.o.o_id, funcs:sum(Column#7)->Column#6, funcs:firstrow(Column#8)->test.o.o_id", " └─ExchangeReceiver_71 9990.00 mpp[tiflash] ", " └─ExchangeSender_70 9990.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.o.o_id, collate: binary]", @@ -699,10 +699,10 @@ { "SQL": "EXPLAIN select o.c_id, count(*) from c, o where c.c_id=o.c_id group by o.c_id; -- 3. test agg push down, group by join column", "Plan": [ - "TableReader_84 8000.00 root data:ExchangeSender_83", - "└─ExchangeSender_83 8000.00 mpp[tiflash] ExchangeType: PassThrough", + "TableReader_78 8000.00 root data:ExchangeSender_77", + "└─ExchangeSender_77 8000.00 mpp[tiflash] ExchangeType: PassThrough", " └─Projection_10 8000.00 mpp[tiflash] test.o.c_id, Column#6", - " └─Projection_77 8000.00 mpp[tiflash] Column#6, test.o.c_id", + " └─Projection_76 8000.00 mpp[tiflash] Column#6, test.o.c_id", " └─HashAgg_75 8000.00 mpp[tiflash] group by:test.o.c_id, funcs:sum(Column#7)->Column#6, funcs:firstrow(Column#8)->test.o.c_id", " └─ExchangeReceiver_71 9990.00 mpp[tiflash] ", " └─ExchangeSender_70 9990.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.o.c_id, collate: binary]",