From 7318d99614ce13ab2be1ad93ad8b4069d11aa378 Mon Sep 17 00:00:00 2001 From: Arenatlx Date: Fri, 10 Apr 2020 15:57:51 +0800 Subject: [PATCH] cherry pick #15409 to release-2.1 Signed-off-by: sre-bot --- ddl/db_integration_test.go | 105 ++++++++++++++++ ddl/ddl_api.go | 33 +++++ ddl/ddl_worker.go | 2 + ddl/rollingback.go | 4 + ddl/table.go | 21 ++++ executor/show.go | 11 ++ executor/show_test.go | 36 ++++++ go.mod | 36 ++++++ go.sum | 36 ++++++ infoschema/builder.go | 5 + meta/autoid/autoid.go | 146 ++++++++++++++++++++--- sessionctx/binloginfo/binloginfo_test.go | 20 ++++ types/parser_driver/special_cmt_ctrl.go | 64 ++++++++++ util/admin/admin.go | 5 + 14 files changed, 510 insertions(+), 14 deletions(-) create mode 100644 types/parser_driver/special_cmt_ctrl.go diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 331dd57036e76..9787e0c0fb812 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -16,6 +16,7 @@ package ddl_test import ( "context" "fmt" + "strconv" "strings" . "github.com/pingcap/check" @@ -997,3 +998,107 @@ func (s *testIntegrationSuite) TestParserIssue284(c *C) { tk.MustExec("drop table test.t_parser_issue_284") tk.MustExec("drop table test.t_parser_issue_284_2") } + +// TestCreateTableWithAutoIdCache test the auto_id_cache table option. +// `auto_id_cache` take effects on handle too when `PKIshandle` is false, +// or even there is no auto_increment column at all. +func (s *testIntegrationSuite3) TestCreateTableWithAutoIdCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("USE test;") + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + + // Test primary key is handle. + tk.MustExec("create table t(a int auto_increment key) auto_id_cache 100") + tblInfo, err := s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(100)) + tk.MustExec("insert into t values()") + tk.MustQuery("select * from t").Check(testkit.Rows("1")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1 values()") + tk.MustQuery("select * from t1").Check(testkit.Rows("101")) + + // Test primary key is not handle. + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int) auto_id_cache 100") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + + tk.MustExec("insert into t values()") + tk.MustQuery("select _tidb_rowid from t").Check(testkit.Rows("1")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1 values()") + tk.MustQuery("select _tidb_rowid from t1").Check(testkit.Rows("101")) + + // Test both auto_increment and rowid exist. + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int null, b int auto_increment unique) auto_id_cache 100") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + + tk.MustExec("insert into t(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t").Check(testkit.Rows("1 2")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t1").Check(testkit.Rows("101 102")) + tk.MustExec("delete from t1") + + // Test alter auto_id_cache. + tk.MustExec("alter table t1 auto_id_cache 200") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(200)) + + tk.MustExec("insert into t1(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t1").Check(testkit.Rows("201 202")) + tk.MustExec("delete from t1") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t1 to t;") + tk.MustExec("insert into t(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t").Check(testkit.Rows("401 402")) + tk.MustExec("delete from t") + + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int auto_increment key) auto_id_cache 3") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(3)) + + // Test insert batch size(4 here) greater than the customized autoid step(3 here). + tk.MustExec("insert into t(a) values(NULL),(NULL),(NULL),(NULL)") + tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1(a) values(NULL)") + next := tk.MustQuery("select a from t1").Rows()[0][0].(string) + nextInt, err := strconv.Atoi(next) + c.Assert(err, IsNil) + c.Assert(nextInt, Greater, 5) + + // Test auto_id_cache overflows int64. + tk.MustExec("drop table if exists t;") + _, err = tk.Exec("create table t(a int) auto_id_cache = 9223372036854775808") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "table option auto_id_cache overflows int64") + + tk.MustExec("create table t(a int) auto_id_cache = 9223372036854775807") + _, err = tk.Exec("alter table t auto_id_cache = 9223372036854775808") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "table option auto_id_cache overflows int64") +} diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 9c4e17d88ed67..36efedcb0fa5f 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1343,6 +1343,12 @@ func handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo) err switch op.Tp { case ast.TableOptionAutoIncrement: tbInfo.AutoIncID = int64(op.UintValue) + case ast.TableOptionAutoIdCache: + if op.UintValue > uint64(math.MaxInt64) { + // TODO: Refine this error. + return errors.New("table option auto_id_cache overflows int64") + } + tbInfo.AutoIdCache = int64(op.UintValue) case ast.TableOptionComment: tbInfo.Comment = op.StrValue case ast.TableOptionCharset: @@ -1502,6 +1508,12 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A err = d.ShardRowID(ctx, ident, opt.UintValue) case ast.TableOptionAutoIncrement: err = d.RebaseAutoID(ctx, ident, int64(opt.UintValue)) + case ast.TableOptionAutoIdCache: + if opt.UintValue > uint64(math.MaxInt64) { + // TODO: Refine this error. + return errors.New("table option auto_id_cache overflows int64") + } + err = d.AlterTableAutoIDCache(ctx, ident, int64(opt.UintValue)) case ast.TableOptionComment: spec.Comment = opt.StrValue err = d.AlterTableComment(ctx, ident, spec) @@ -2302,6 +2314,27 @@ func (d *ddl) AlterTableComment(ctx sessionctx.Context, ident ast.Ident, spec *a return errors.Trace(err) } +// AlterTableAutoIDCache updates the table comment information. +func (d *ddl) AlterTableAutoIDCache(ctx sessionctx.Context, ident ast.Ident, newCache int64) error { + schema, tb, err := d.getSchemaAndTableByIdent(ctx, ident) + if err != nil { + return errors.Trace(err) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: tb.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionModifyTableAutoIdCache, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{newCache}, + } + + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + // AlterTableCharset changes the table charset and collate. func (d *ddl) AlterTableCharsetAndCollate(ctx sessionctx.Context, ident ast.Ident, toCharset, toCollate string) error { // use the last one. diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index baf374a15841f..e95eac801441d 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -527,6 +527,8 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = w.onShardRowID(d, t, job) case model.ActionModifyTableComment: ver, err = onModifyTableComment(t, job) + case model.ActionModifyTableAutoIdCache: + ver, err = onModifyTableAutoIDCache(t, job) case model.ActionAddTablePartition: ver, err = onAddTablePartition(t, job) case model.ActionModifyTableCharsetAndCollate: diff --git a/ddl/rollingback.go b/ddl/rollingback.go index 64fb47ed3d1e6..a25db920b3242 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -271,7 +271,11 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) model.ActionModifyColumn, model.ActionAddForeignKey, model.ActionDropForeignKey, model.ActionRenameTable, model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition, +<<<<<<< HEAD model.ActionModifySchemaCharsetAndCollate: +======= + model.ActionModifySchemaCharsetAndCollate, model.ActionRepairTable, model.ActionModifyTableAutoIdCache: +>>>>>>> 1c73dec... ddl: add syntax for setting the cache step of auto id explicitly. (#15409) ver, err = cancelOnlyNotHandledJob(job) case model.ActionDropTable, model.ActionDropSchema: job.State = model.JobStateRollingback diff --git a/ddl/table.go b/ddl/table.go index 1ae046940b54f..cd32c9ffd7956 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -320,6 +320,27 @@ func onRebaseAutoID(store kv.Storage, t *meta.Meta, job *model.Job) (ver int64, return ver, nil } +func onModifyTableAutoIDCache(t *meta.Meta, job *model.Job) (int64, error) { + var cache int64 + if err := job.DecodeArgs(&cache); err != nil { + job.State = model.JobStateCancelled + return 0, errors.Trace(err) + } + + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + if err != nil { + return 0, errors.Trace(err) + } + + tblInfo.AutoIdCache = cache + ver, err := updateVersionAndTableInfo(t, job, tblInfo, true) + if err != nil { + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + return ver, nil +} + func (w *worker) onShardRowID(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { var shardRowIDBits uint64 err := job.DecodeArgs(&shardRowIDBits) diff --git a/executor/show.go b/executor/show.go index 6859c5c80a764..18a2f44588e82 100644 --- a/executor/show.go +++ b/executor/show.go @@ -684,10 +684,21 @@ func (e *ShowExec) fetchShowCreateTable() error { } } +<<<<<<< HEAD if tb.Meta().ShardRowIDBits > 0 { fmt.Fprintf(&buf, "/*!90000 SHARD_ROW_ID_BITS=%d ", tb.Meta().ShardRowIDBits) if tb.Meta().PreSplitRegions > 0 { fmt.Fprintf(&buf, "PRE_SPLIT_REGIONS=%d ", tb.Meta().PreSplitRegions) +======= + if tableInfo.AutoIdCache != 0 { + fmt.Fprintf(buf, " /*T![auto_id_cache] AUTO_ID_CACHE=%d */", tableInfo.AutoIdCache) + } + + if tableInfo.ShardRowIDBits > 0 { + fmt.Fprintf(buf, "/*!90000 SHARD_ROW_ID_BITS=%d ", tableInfo.ShardRowIDBits) + if tableInfo.PreSplitRegions > 0 { + fmt.Fprintf(buf, "PRE_SPLIT_REGIONS=%d ", tableInfo.PreSplitRegions) +>>>>>>> 1c73dec... ddl: add syntax for setting the cache step of auto id explicitly. (#15409) } buf.WriteString("*/") } diff --git a/executor/show_test.go b/executor/show_test.go index 49287a5912538..60141e2ce3dd8 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -463,6 +463,42 @@ func (s *testSuite5) TestShowCreateTable(c *C) { )) } +// Override testAutoRandomSuite to test auto id cache. +func (s *testAutoRandomSuite) TestAutoIdCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int auto_increment key) auto_id_cache = 10") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL AUTO_INCREMENT,\n"+ + " PRIMARY KEY (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=10 */", + )) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int auto_increment unique, b int key) auto_id_cache 100") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL AUTO_INCREMENT,\n"+ + " `b` int(11) NOT NULL,\n"+ + " PRIMARY KEY (`b`),\n"+ + " UNIQUE KEY `a` (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=100 */", + )) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int key) auto_id_cache 5") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL,\n"+ + " PRIMARY KEY (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=5 */", + )) +} + func (s *testSuite5) TestShowEscape(c *C) { tk := testkit.NewTestKit(c, s.store) diff --git a/go.mod b/go.mod index ddd5cf650fb80..11a206efdaa81 100644 --- a/go.mod +++ b/go.mod @@ -45,6 +45,7 @@ require ( github.com/onsi/gomega v1.4.1 // indirect github.com/opentracing/basictracer-go v1.0.0 github.com/opentracing/opentracing-go v1.0.2 +<<<<<<< HEAD github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 github.com/pingcap/errors v0.11.4 github.com/pingcap/failpoint v0.0.0-20190430075617-bf45ab20bfc4 @@ -75,6 +76,41 @@ require ( golang.org/x/net v0.0.0-20180906233101-161cd47e91fd golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect golang.org/x/sys v0.0.0-20191010194322-b09406accb47 // indirect +======= + github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 + github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 + github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798 + github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d + github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 + github.com/pingcap/kvproto v0.0.0-20200409034505-a5af800ca2ef + github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd + github.com/pingcap/parser v0.0.0-20200410065024-81f3db8e6095 + github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 + github.com/pingcap/sysutil v0.0.0-20200408114249-ed3bd6f7fdb1 + github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible + github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 + github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 + github.com/prometheus/common v0.4.1 + github.com/shirou/gopsutil v2.19.10+incompatible + github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 // indirect + github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca // indirect + github.com/sirupsen/logrus v1.2.0 + github.com/soheilhy/cmux v0.1.4 + github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 + github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 + github.com/uber-go/atomic v1.3.2 + github.com/uber/jaeger-client-go v2.15.0+incompatible + github.com/uber/jaeger-lib v1.5.0 // indirect + go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 + go.uber.org/atomic v1.6.0 + go.uber.org/automaxprocs v1.2.0 + go.uber.org/zap v1.14.1 + golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/net v0.0.0-20200301022130-244492dfa37a + golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 +>>>>>>> 1c73dec... ddl: add syntax for setting the cache step of auto id explicitly. (#15409) golang.org/x/text v0.3.2 golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac diff --git a/go.sum b/go.sum index 50b70fa788127..18eeb870a20d7 100644 --- a/go.sum +++ b/go.sum @@ -99,6 +99,7 @@ github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuM github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +<<<<<<< HEAD github.com/pingcap/failpoint v0.0.0-20190430075617-bf45ab20bfc4 h1:4dCk6ysGubtlSc9hE/t5Ptl6mMVxSoWSsTvGSbFJwJ8= github.com/pingcap/failpoint v0.0.0-20190430075617-bf45ab20bfc4/go.mod h1:p2F6D0adua5g+596cw96U8hU8slkeJhboEV7ySGDeEg= github.com/pingcap/gofail v0.0.0-20181217135706-6a951c1e42c3 h1:04yuCf5NMvLU8rB2m4Qs3rynH7EYpMno3lHkewIOdMo= @@ -117,6 +118,41 @@ github.com/pingcap/tidb-tools v2.1.3-0.20190116051332-34c808eef588+incompatible github.com/pingcap/tidb-tools v2.1.3-0.20190116051332-34c808eef588+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20200401051341-79a721ff4a15 h1:lI0m0q/DWnqDnKBc5TAQFb3dILDQ6KlW+dbY8AwfeqU= github.com/pingcap/tipb v0.0.0-20200401051341-79a721ff4a15/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +======= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0hl9LcYo6cZoGBGiLtefMQMF/vo3XLgQ= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d h1:F8vp38kTAckN+v8Jlc98uMBvKIzr1a+UhnLyVYn8Q5Q= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798 h1:6DMbRqPI1qzQ8N1xc3+nKY8IxSACd9VqQKkRVvbyoIg= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d h1:rCmRK0lCRrHMUbS99BKFYhK9YxJDNw0xB033cQbYo0s= +github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= +github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20200214064158-62d31900d88e/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200221034943-a2aa1d1e20a8/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200409034505-a5af800ca2ef h1:t+bOucRUlIlzW+6S32qG8ufu4iC8F8LEld4Rdhhp1Aw= +github.com/pingcap/kvproto v0.0.0-20200409034505-a5af800ca2ef/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/parser v0.0.0-20200410065024-81f3db8e6095 h1:DyL/YbS4r89FmiZd3XbUrpMSsVFtpOZzh1busGKytiI= +github.com/pingcap/parser v0.0.0-20200410065024-81f3db8e6095/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 h1:Yrp99FnjHAEuDrSBql2l0IqCtJX7KwJbTsD5hIArkvk= +github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3/go.mod h1:25GfNw6+Jcr9kca5rtmTb4gKCJ4jOpow2zV2S9Dgafs= +github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1 h1:YUnUZ914SHFMsOSe/xgH5DKK/thtRma8X8hcszRo3CA= +github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/sysutil v0.0.0-20200408114249-ed3bd6f7fdb1 h1:PI8YpTl45F8ilNkrPtT4IdbcZB1SCEa+gK/U5GJYl3E= +github.com/pingcap/sysutil v0.0.0-20200408114249-ed3bd6f7fdb1/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible h1:84F7MFMfdAYObrznvRslmVu43aoihrlL+7mMyMlOi0o= +github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 h1:aJPXrT1u4VfUSGFA2oQVwl4pOXzqe+YI6wed01cjDH4= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +>>>>>>> 1c73dec... ddl: add syntax for setting the cache step of auto id explicitly. (#15409) github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= diff --git a/infoschema/builder.go b/infoschema/builder.go index 91df02829545e..5e95a3f5a5ec2 100644 --- a/infoschema/builder.go +++ b/infoschema/builder.go @@ -78,8 +78,13 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) ([]int64, erro // We try to reuse the old allocator, so the cached auto ID can be reused. var alloc autoid.Allocator if tableIDIsValid(oldTableID) { +<<<<<<< HEAD if oldTableID == newTableID && diff.Type != model.ActionRenameTable && diff.Type != model.ActionRebaseAutoID { alloc, _ = b.is.AllocByID(oldTableID) +======= + if oldTableID == newTableID && diff.Type != model.ActionRenameTable && diff.Type != model.ActionRebaseAutoID && diff.Type != model.ActionModifyTableAutoIdCache { + allocs, _ = b.is.AllocByID(oldTableID) +>>>>>>> 1c73dec... ddl: add syntax for setting the cache step of auto id explicitly. (#15409) } if diff.Type == model.ActionRenameTable && diff.OldSchemaID != diff.SchemaID { oldRoDBInfo, ok := b.is.SchemaByID(diff.OldSchemaID) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index ee43efde60a94..4f2f05b3d4f06 100755 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -42,6 +42,20 @@ var step = int64(30000) var errInvalidTableID = terror.ClassAutoid.New(codeInvalidTableID, "invalid TableID") +// CustomAutoIncCacheOption is one kind of AllocOption to customize the allocator step length. +type CustomAutoIncCacheOption int64 + +// ApplyOn is implement the AllocOption interface. +func (step CustomAutoIncCacheOption) ApplyOn(alloc *allocator) { + alloc.step = int64(step) + alloc.customStep = true +} + +// AllocOption is a interface to define allocator custom options coming in future. +type AllocOption interface { + ApplyOn(*allocator) +} + // Allocator is an auto increment id generator. // Just keep id unique actually. type Allocator interface { @@ -71,6 +85,12 @@ type allocator struct { isUnsigned bool lastAllocTime time.Time step int64 +<<<<<<< HEAD +======= + customStep bool + allocType AllocatorType + sequence *model.SequenceInfo +>>>>>>> 1c73dec... ddl: add syntax for setting the cache step of auto id explicitly. (#15409) } // GetStep is only used by tests @@ -218,6 +238,94 @@ func (alloc *allocator) Rebase(tableID, requiredBase int64, allocIDs bool) error return alloc.rebase4Signed(tableID, requiredBase, allocIDs) } +<<<<<<< HEAD +======= +// Rebase implements autoid.Allocator RebaseSeq interface. +// The return value is quite same as expression function, bool means whether it should be NULL, +// here it will be used in setval expression function (true meaning the set value has been satisfied, return NULL). +// case1:When requiredBase is satisfied with current value, it will return (0, true, nil), +// case2:When requiredBase is successfully set in, it will return (requiredBase, false, nil). +// If some error occurs in the process, return it immediately. +func (alloc *allocator) RebaseSeq(tableID, requiredBase int64) (int64, bool, error) { + if tableID == 0 { + return 0, false, errInvalidTableID.GenWithStack("Invalid tableID") + } + + alloc.mu.Lock() + defer alloc.mu.Unlock() + return alloc.rebase4Sequence(tableID, requiredBase) +} + +func (alloc *allocator) GetType() AllocatorType { + return alloc.allocType +} + +// NextStep return new auto id step according to previous step and consuming time. +func NextStep(curStep int64, consumeDur time.Duration) int64 { + failpoint.Inject("mockAutoIDChange", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(step) + } + }) + + consumeRate := defaultConsumeTime.Seconds() / consumeDur.Seconds() + res := int64(float64(curStep) * consumeRate) + if res < minStep { + return minStep + } else if res > maxStep { + return maxStep + } + return res +} + +// NewAllocator returns a new auto increment id generator on the store. +func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool, allocType AllocatorType, opts ...AllocOption) Allocator { + alloc := &allocator{ + store: store, + dbID: dbID, + isUnsigned: isUnsigned, + step: step, + lastAllocTime: time.Now(), + allocType: allocType, + } + for _, fn := range opts { + fn.ApplyOn(alloc) + } + return alloc +} + +// NewSequenceAllocator returns a new sequence value generator on the store. +func NewSequenceAllocator(store kv.Storage, dbID int64, info *model.SequenceInfo) Allocator { + return &allocator{ + store: store, + dbID: dbID, + // Sequence allocator is always signed. + isUnsigned: false, + lastAllocTime: time.Now(), + allocType: SequenceType, + sequence: info, + } +} + +// NewAllocatorsFromTblInfo creates an array of allocators of different types with the information of model.TableInfo. +func NewAllocatorsFromTblInfo(store kv.Storage, schemaID int64, tblInfo *model.TableInfo) Allocators { + var allocs []Allocator + dbID := tblInfo.GetDBID(schemaID) + if tblInfo.AutoIdCache > 0 { + allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType, CustomAutoIncCacheOption(tblInfo.AutoIdCache))) + } else { + allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType)) + } + if tblInfo.ContainsAutoRandomBits() { + allocs = append(allocs, NewAllocator(store, dbID, tblInfo.IsAutoRandomBitColUnsigned(), AutoRandomType)) + } + if tblInfo.IsSequence() { + allocs = append(allocs, NewSequenceAllocator(store, dbID, tblInfo.Sequence)) + } + return NewAllocators(allocs...) +} + +>>>>>>> 1c73dec... ddl: add syntax for setting the cache step of auto id explicitly. (#15409) // Alloc implements autoid.Allocator Alloc interface. func (alloc *allocator) Alloc(tableID int64, n uint64) (int64, int64, error) { if tableID == 0 { @@ -244,13 +352,18 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64) (int64, int64, err if alloc.base+n1 > alloc.end { var newBase, newEnd int64 startTime := time.Now() - // Although it may skip a segment here, we still think it is consumed. - consumeDur := startTime.Sub(alloc.lastAllocTime) - nextStep := NextStep(alloc.step, consumeDur) - // Make sure nextStep is big enough. + nextStep := alloc.step + if !alloc.customStep { + // Although it may skip a segment here, we still think it is consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep = NextStep(alloc.step, consumeDur) + } + // Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch. if nextStep <= n1 { - alloc.step = mathutil.MinInt64(n1*2, maxStep) - } else { + nextStep = mathutil.MinInt64(n1*2, maxStep) + } + // Store the step for non-customized-step allocator to calculate next dynamic step. + if !alloc.customStep { alloc.step = nextStep } err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { @@ -260,7 +373,7 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64) (int64, int64, err if err1 != nil { return errors.Trace(err1) } - tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) + tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, nextStep) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed @@ -298,13 +411,18 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) (int64, int64, e if uint64(alloc.base)+n > uint64(alloc.end) { var newBase, newEnd int64 startTime := time.Now() - // Although it may skip a segment here, we still treat it as consumed. - consumeDur := startTime.Sub(alloc.lastAllocTime) - nextStep := NextStep(alloc.step, consumeDur) - // Make sure nextStep is big enough. + nextStep := alloc.step + if !alloc.customStep { + // Although it may skip a segment here, we still treat it as consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep = NextStep(alloc.step, consumeDur) + } + // Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch. if nextStep <= n1 { - alloc.step = mathutil.MinInt64(n1*2, maxStep) - } else { + nextStep = mathutil.MinInt64(n1*2, maxStep) + } + // Store the step for non-customized-step allocator to calculate next dynamic step. + if !alloc.customStep { alloc.step = nextStep } err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { @@ -314,7 +432,7 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64) (int64, int64, e if err1 != nil { return errors.Trace(err1) } - tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) + tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(nextStep))) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed diff --git a/sessionctx/binloginfo/binloginfo_test.go b/sessionctx/binloginfo/binloginfo_test.go index befccbb755fd7..8ca2169d51dde 100644 --- a/sessionctx/binloginfo/binloginfo_test.go +++ b/sessionctx/binloginfo/binloginfo_test.go @@ -467,6 +467,26 @@ func (s *testBinlogSuite) TestAddSpecialComment(c *C) { "alter table t shard_row_id_bits=2 ", "alter table t /*!90000 shard_row_id_bits=2 */", }, + { + "create table t1 (id int auto_increment key) auto_id_cache 100;", + "create table t1 (id int auto_increment key) /*T![auto_id_cache] auto_id_cache 100 */ ;", + }, + { + "create table t1 (id int auto_increment unique) auto_id_cache 10;", + "create table t1 (id int auto_increment unique) /*T![auto_id_cache] auto_id_cache 10 */ ;", + }, + { + "create table t1 (id int) auto_id_cache = 5;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache = 5 */ ;", + }, + { + "create table t1 (id int) auto_id_cache=5;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + }, + { + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + }, } for _, ca := range testCase { re := binloginfo.AddSpecialComment(ca.input) diff --git a/types/parser_driver/special_cmt_ctrl.go b/types/parser_driver/special_cmt_ctrl.go new file mode 100644 index 0000000000000..10a1fb6756eb6 --- /dev/null +++ b/types/parser_driver/special_cmt_ctrl.go @@ -0,0 +1,64 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "regexp" + + "github.com/pingcap/parser" +) + +// To add new features that needs to be downgrade-compatible, +// 1. Define a featureID below and make sure it is unique. +// For example, `const FeatureIDMyFea = "my_fea"`. +// 2. Register the new featureID in init(). +// Only the registered parser can parse the comment annotated with `my_fea`. +// Now, the parser treats `/*T![my_fea] what_ever */` and `what_ever` equivalent. +// In other word, the parser in old-version TiDB will ignores these comments. +// 3. [optional] Add a pattern into FeatureIDPatterns. +// This is only required if the new feature is contained in DDL, +// and we want to comment out this part of SQL in binlog. +func init() { + parser.SpecialCommentsController.Register(string(FeatureIDAutoRandom)) + parser.SpecialCommentsController.Register(string(FeatureIDAutoIDCache)) +} + +// SpecialCommentVersionPrefix is the prefix of TiDB executable comments. +const SpecialCommentVersionPrefix = `/*T!` + +// BuildSpecialCommentPrefix returns the prefix of `featureID` special comment. +// For some special feature in TiDB, we will refine ddl query with special comment, +// which may be useful when +// A: the downstream is directly MySQL instance (treat it as comment for compatibility). +// B: the downstream is lower version TiDB (ignore the unknown feature comment). +// C: the downstream is same/higher version TiDB (parse the feature syntax out). +func BuildSpecialCommentPrefix(featureID featureID) string { + return fmt.Sprintf("%s[%s]", SpecialCommentVersionPrefix, featureID) +} + +type featureID string + +const ( + // FeatureIDAutoRandom is the `auto_random` feature. + FeatureIDAutoRandom featureID = "auto_rand" + // FeatureIDAutoIDCache is the `auto_id_cache` feature. + FeatureIDAutoIDCache featureID = "auto_id_cache" +) + +// FeatureIDPatterns is used to record special comments patterns. +var FeatureIDPatterns = map[featureID]*regexp.Regexp{ + FeatureIDAutoRandom: regexp.MustCompile(`(?i)AUTO_RANDOM\s*(\(\s*\d+\s*\))?\s*`), + FeatureIDAutoIDCache: regexp.MustCompile(`(?i)AUTO_ID_CACHE\s*=?\s*\d+\s*`), +} diff --git a/util/admin/admin.go b/util/admin/admin.go index 0a9a695b5cc64..5f9b6930758fd 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -110,10 +110,15 @@ func isJobRollbackable(job *model.Job, id int64) error { model.ActionTruncateTable, model.ActionAddForeignKey, model.ActionDropForeignKey, model.ActionRenameTable, model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition, +<<<<<<< HEAD model.ActionModifySchemaCharsetAndCollate: if job.SchemaState != model.StateNone { return ErrCannotCancelDDLJob.GenWithStackByArgs(id) } +======= + model.ActionModifySchemaCharsetAndCollate, model.ActionRepairTable, model.ActionModifyTableAutoIdCache: + return job.SchemaState == model.StateNone +>>>>>>> 1c73dec... ddl: add syntax for setting the cache step of auto id explicitly. (#15409) } return nil }