From 6f8c040c5538ea35cd90e03387a684111187771b Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Tue, 3 Mar 2020 23:32:54 +0800 Subject: [PATCH 01/35] use Location to store position and gtid --- loader/loader.go | 2 +- pkg/binlog/position.go | 21 ++++ pkg/utils/mydumper.go | 39 ++++--- syncer/checkpoint.go | 200 ++++++++++++++++++++-------------- syncer/job.go | 113 +++++++++---------- syncer/relay.go | 6 +- syncer/status.go | 9 +- syncer/streamer_controller.go | 72 +++++++----- syncer/syncer.go | 45 ++++---- syncer/warning.go | 9 +- 10 files changed, 299 insertions(+), 217 deletions(-) diff --git a/loader/loader.go b/loader/loader.go index c8cdd62aa4..79141957b6 100644 --- a/loader/loader.go +++ b/loader/loader.go @@ -1194,7 +1194,7 @@ func (l *Loader) checkpointID() string { func (l *Loader) getMydumpMetadata() error { metafile := filepath.Join(l.cfg.LoaderConfig.Dir, "metadata") - pos, err := utils.ParseMetaData(metafile) + pos, _, err := utils.ParseMetaData(metafile) if err != nil { l.logCtx.L().Error("fail to parse dump metadata", log.ShortError(err)) return err diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 0d29209e6e..88fcff0766 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -14,6 +14,7 @@ package binlog import ( + "fmt" "strconv" "strings" @@ -160,3 +161,23 @@ func ComparePosition(pos1, pos2 gmysql.Position) int { return adjustedPos1.Compare(adjustedPos2) } + +// Location is used for save binlog's position and gtid +type Location struct { + Position gmysql.Position + + GTID string +} + +func (p Location) String() string { + return fmt.Sprintf("Position: %v, GTID: %s", p.Position, p.GTID) +} + +// CompareLocation returns: +// 1 if point1 is bigger than point2 +// 0 if point1 is equal to point2 +// -1 if point1 is less than point2 +func CompareLocation(location1, location2 Location) int { + // TODO: compare gtid + return 1 +} diff --git a/pkg/utils/mydumper.go b/pkg/utils/mydumper.go index d826d750e3..7df801f1fc 100644 --- a/pkg/utils/mydumper.go +++ b/pkg/utils/mydumper.go @@ -26,22 +26,24 @@ import ( "github.com/pingcap/dm/pkg/terror" ) -// ParseMetaData parses mydumper's output meta file and returns binlog position -func ParseMetaData(filename string) (*mysql.Position, error) { +// ParseMetaData parses mydumper's output meta file and returns binlog position and GTID +func ParseMetaData(filename string) (*mysql.Position, string, error) { fd, err := os.Open(filename) if err != nil { - return nil, terror.ErrParseMydumperMeta.Generate(err) + return nil, "", terror.ErrParseMydumperMeta.Generate(err) } defer fd.Close() - var logName = "" + pos := new(mysql.Position) + gtid := "" + br := bufio.NewReader(fd) for { line, err := br.ReadString('\n') if err == io.EOF { break } else if err != nil { - return nil, terror.ErrParseMydumperMeta.Generate(err) + return nil, "", terror.ErrParseMydumperMeta.Generate(err) } line = strings.TrimSpace(line[:len(line)-1]) if len(line) == 0 { @@ -52,23 +54,30 @@ func ParseMetaData(filename string) (*mysql.Position, error) { // now, we only parse log / pos for `SHOW MASTER STATUS` break } - parts := strings.Split(line, ": ") + parts := strings.SplitN(line, ":", 2) if len(parts) != 2 { continue } - if parts[0] == "Log" { - logName = parts[1] - } else if parts[0] == "Pos" { + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + + switch key { + case "Log": + pos.Name = value + case "Pos": pos64, err := strconv.ParseUint(parts[1], 10, 32) if err != nil { - return nil, terror.ErrParseMydumperMeta.Generate(err) - } - if len(logName) > 0 { - return &mysql.Position{Name: logName, Pos: uint32(pos64)}, nil + return nil, "", terror.ErrParseMydumperMeta.Generate(err) } - break // Pos extracted, but no Log, error occurred + pos.Pos = uint32(pos64) + case "GTID": + gtid = value } } - return nil, terror.ErrParseMydumperMeta.Generate(fmt.Sprintf("file %s invalid format", filename)) + if len(pos.Name) == 0 || pos.Pos == uint32(0) { + return nil, "", terror.ErrParseMydumperMeta.Generate(fmt.Sprintf("file %s invalid format", filename)) + } + + return pos, gtid, nil } diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index cdf83d9a3d..1cb1d0ad95 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -21,7 +21,7 @@ import ( "time" "github.com/pingcap/dm/dm/config" - "github.com/pingcap/dm/pkg/binlog" + pbinlog "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/log" @@ -50,38 +50,53 @@ var ( globalCpSchema = "" // global checkpoint's cp_schema globalCpTable = "" // global checkpoint's cp_table maxCheckPointTimeout = "1m" - minCheckpoint = mysql.Position{Pos: 4} + minPosition = mysql.Position{Pos: 4} + minLocation = pbinlog.Location{ + Position: minPosition, + } maxCheckPointSaveTime = 30 * time.Second ) -// NOTE: now we sync from relay log, so not add GTID support yet type binlogPoint struct { sync.RWMutex - mysql.Position - ti *model.TableInfo - flushedPos mysql.Position // pos which flushed permanently - flushedTI *model.TableInfo + //pbinlog.Location + //gtid string + location pbinlog.Location + ti *model.TableInfo + + //flushedPos pbinlog.Location // pos which flushed permanently + //flushedGTID string // gtid which flushed permanently + flushedLocation pbinlog.Location + flushedTI *model.TableInfo } -func newBinlogPoint(pos mysql.Position, ti *model.TableInfo, flushedPos mysql.Position, flushedTI *model.TableInfo) *binlogPoint { +func newBinlogPoint(location, flushedLocation pbinlog.Location, ti, flushedTI *model.TableInfo) *binlogPoint { return &binlogPoint{ - Position: pos, - ti: ti, - flushedPos: flushedPos, - flushedTI: flushedTI, + location: location, + ti: ti, + flushedLocation: flushedLocation, + flushedTI: flushedTI, } } -func (b *binlogPoint) save(pos mysql.Position, ti *model.TableInfo) error { +func (b *binlogPoint) save(location pbinlog.Location, ti *model.TableInfo) error { b.Lock() defer b.Unlock() - if binlog.ComparePosition(pos, b.Position) < 0 { + // TODO: add gtid compare + /* + if pbinlog.ComparePosition(pos, b.Position) < 0 { + // support to save equal pos, but not older pos + return terror.ErrCheckpointSaveInvalidPos.Generate(pos, b.Position) + } + */ + if pbinlog.CompareLocation(location, b.location) < 0 { // support to save equal pos, but not older pos - return terror.ErrCheckpointSaveInvalidPos.Generate(pos, b.Position) + return terror.ErrCheckpointSaveInvalidPos.Generate(location, b.location.Position) } - b.Position = pos + + b.location = location b.ti = ti return nil } @@ -89,14 +104,14 @@ func (b *binlogPoint) save(pos mysql.Position, ti *model.TableInfo) error { func (b *binlogPoint) flush() { b.Lock() defer b.Unlock() - b.flushedPos = b.Position + b.flushedLocation = b.location b.flushedTI = b.ti } func (b *binlogPoint) rollback() (isSchemaChanged bool) { b.Lock() defer b.Unlock() - b.Position = b.flushedPos + b.location = b.flushedLocation if isSchemaChanged = b.ti != b.flushedTI; isSchemaChanged { b.ti = b.flushedTI } @@ -106,21 +121,23 @@ func (b *binlogPoint) rollback() (isSchemaChanged bool) { func (b *binlogPoint) outOfDate() bool { b.RLock() defer b.RUnlock() - return binlog.ComparePosition(b.Position, b.flushedPos) > 0 + // TODO: add gtid compare + //return pbinlog.ComparePosition(b.Position, b.flushedPos) > 0 + return pbinlog.CompareLocation(b.location, b.flushedLocation) > 0 } -// MySQLPos returns point as mysql.Position -func (b *binlogPoint) MySQLPos() mysql.Position { +// MySQLPoint returns point as pbinlog.Location +func (b *binlogPoint) MySQLLocation() pbinlog.Location { b.RLock() defer b.RUnlock() - return b.Position + return b.location } -// FlushedMySQLPos returns flushed point as mysql.Position -func (b *binlogPoint) FlushedMySQLPos() mysql.Position { +// FlushedMySQLPoint returns flushed point as pbinlog.Location +func (b *binlogPoint) FlushedMySQLLocation() pbinlog.Location { b.RLock() defer b.RUnlock() - return b.flushedPos + return b.flushedLocation } // TableInfo returns the table schema associated at the current binlog position. @@ -134,7 +151,7 @@ func (b *binlogPoint) String() string { b.RLock() defer b.RUnlock() - return fmt.Sprintf("%v(flushed %v)", b.Position, b.flushedPos) + return fmt.Sprintf("%v(flushed %v)", b.location, b.flushedLocation) } // CheckPoint represents checkpoints status for syncer @@ -163,7 +180,7 @@ type CheckPoint interface { LoadMeta() error // SaveTablePoint saves checkpoint for specified table in memory - SaveTablePoint(sourceSchema, sourceTable string, pos mysql.Position, ti *model.TableInfo) + SaveTablePoint(sourceSchema, sourceTable string, point pbinlog.Location, ti *model.TableInfo) // DeleteTablePoint deletes checkpoint for specified table in memory and storage DeleteTablePoint(tctx *tcontext.Context, sourceSchema, sourceTable string) error @@ -172,11 +189,11 @@ type CheckPoint interface { DeleteSchemaPoint(tctx *tcontext.Context, sourceSchema string) error // IsNewerTablePoint checks whether job's checkpoint is newer than previous saved checkpoint - IsNewerTablePoint(sourceSchema, sourceTable string, pos mysql.Position) bool + IsNewerTablePoint(sourceSchema, sourceTable string, point pbinlog.Location) bool // SaveGlobalPoint saves the global binlog stream's checkpoint // corresponding to Meta.Save - SaveGlobalPoint(pos mysql.Position) + SaveGlobalPoint(point pbinlog.Location) // FlushGlobalPointsExcept flushes the global checkpoint and tables' // checkpoints except exceptTables, it also flushes SQLs with Args providing @@ -186,15 +203,15 @@ type CheckPoint interface { FlushPointsExcept(tctx *tcontext.Context, exceptTables [][]string, extraSQLs []string, extraArgs [][]interface{}) error // GlobalPoint returns the global binlog stream's checkpoint - // corresponding to to Meta.Pos - GlobalPoint() mysql.Position + // corresponding to Meta.Pos and Meta.GTID + GlobalPoint() pbinlog.Location // TablePoint returns all table's stream checkpoint - TablePoint() map[string]map[string]mysql.Position + TablePoint() map[string]map[string]pbinlog.Location // FlushedGlobalPoint returns the flushed global binlog stream's checkpoint - // corresponding to to Meta.Pos - FlushedGlobalPoint() mysql.Position + // corresponding to to Meta.Pos and gtid + FlushedGlobalPoint() pbinlog.Location // CheckGlobalPoint checks whether we should save global checkpoint // corresponding to Meta.Check @@ -244,7 +261,7 @@ func NewRemoteCheckPoint(tctx *tcontext.Context, cfg *config.SubTaskConfig, id s tableName: dbutil.TableName(cfg.MetaSchema, cfg.Name+"_syncer_checkpoint"), id: id, points: make(map[string]map[string]*binlogPoint), - globalPoint: newBinlogPoint(minCheckpoint, nil, minCheckpoint, nil), + globalPoint: newBinlogPoint(minLocation, minLocation, nil, nil), logCtx: tcontext.Background().WithLogger(tctx.L().WithFields(zap.String("component", "remote checkpoint"))), } @@ -290,7 +307,7 @@ func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { return err } - cp.globalPoint = newBinlogPoint(minCheckpoint, nil, minCheckpoint, nil) + cp.globalPoint = newBinlogPoint(minLocation, minLocation, nil, nil) cp.points = make(map[string]map[string]*binlogPoint) @@ -298,20 +315,25 @@ func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { } // SaveTablePoint implements CheckPoint.SaveTablePoint -func (cp *RemoteCheckPoint) SaveTablePoint(sourceSchema, sourceTable string, pos mysql.Position, ti *model.TableInfo) { +func (cp *RemoteCheckPoint) SaveTablePoint(sourceSchema, sourceTable string, point pbinlog.Location, ti *model.TableInfo) { cp.Lock() defer cp.Unlock() - cp.saveTablePoint(sourceSchema, sourceTable, pos, ti) + cp.saveTablePoint(sourceSchema, sourceTable, point, ti) } // saveTablePoint saves single table's checkpoint without mutex.Lock -func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, pos mysql.Position, ti *model.TableInfo) { - if binlog.ComparePosition(cp.globalPoint.Position, pos) > 0 { - panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", pos, cp.globalPoint)) +func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, location pbinlog.Location, ti *model.TableInfo) { + /* + if pbinlog.ComparePosition(cp.globalPoint.Position, pos) > 0 { + panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", pos, cp.globalPoint)) + } + */ + if pbinlog.CompareLocation(cp.globalPoint.location, location) > 0 { + panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) } // we save table checkpoint while we meet DDL or DML - cp.logCtx.L().Debug("save table checkpoint", zap.Stringer("position", pos), zap.String("schema", sourceSchema), zap.String("table", sourceTable)) + cp.logCtx.L().Debug("save table checkpoint", zap.Stringer("loaction", location), zap.String("schema", sourceSchema), zap.String("table", sourceTable)) mSchema, ok := cp.points[sourceSchema] if !ok { mSchema = make(map[string]*binlogPoint) @@ -319,8 +341,8 @@ func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, pos } point, ok := mSchema[sourceTable] if !ok { - mSchema[sourceTable] = newBinlogPoint(pos, ti, minCheckpoint, nil) - } else if err := point.save(pos, ti); err != nil { + mSchema[sourceTable] = newBinlogPoint(location, minLocation, ti, nil) + } else if err := point.save(location, ti); err != nil { cp.logCtx.L().Error("fail to save table point", zap.String("schema", sourceSchema), zap.String("table", sourceTable), log.ShortError(err)) } } @@ -374,7 +396,7 @@ func (cp *RemoteCheckPoint) DeleteSchemaPoint(tctx *tcontext.Context, sourceSche } // IsNewerTablePoint implements CheckPoint.IsNewerTablePoint -func (cp *RemoteCheckPoint) IsNewerTablePoint(sourceSchema, sourceTable string, pos mysql.Position) bool { +func (cp *RemoteCheckPoint) IsNewerTablePoint(sourceSchema, sourceTable string, location pbinlog.Location) bool { cp.RLock() defer cp.RUnlock() mSchema, ok := cp.points[sourceSchema] @@ -385,13 +407,13 @@ func (cp *RemoteCheckPoint) IsNewerTablePoint(sourceSchema, sourceTable string, if !ok { return true } - oldPos := point.MySQLPos() + oldLocation := point.MySQLLocation() - return binlog.ComparePosition(pos, oldPos) > 0 + return pbinlog.CompareLocation(location, oldLocation) > 0 } // SaveGlobalPoint implements CheckPoint.SaveGlobalPoint -func (cp *RemoteCheckPoint) SaveGlobalPoint(pos mysql.Position) { +func (cp *RemoteCheckPoint) SaveGlobalPoint(pos pbinlog.Location) { cp.Lock() defer cp.Unlock() @@ -422,8 +444,8 @@ func (cp *RemoteCheckPoint) FlushPointsExcept(tctx *tcontext.Context, exceptTabl args := make([][]interface{}, 0, 100) if cp.globalPoint.outOfDate() { - posG := cp.GlobalPoint() - sqlG, argG := cp.genUpdateSQL(globalCpSchema, globalCpTable, posG.Name, posG.Pos, nil, true) + locationG := cp.GlobalPoint() + sqlG, argG := cp.genUpdateSQL(globalCpSchema, globalCpTable, locationG, nil, true) sqls = append(sqls, sqlG) args = append(args, argG) } @@ -443,8 +465,8 @@ func (cp *RemoteCheckPoint) FlushPointsExcept(tctx *tcontext.Context, exceptTabl return terror.ErrSchemaTrackerCannotSerialize.Delegate(err, schema, table) } - pos := point.MySQLPos() - sql2, arg := cp.genUpdateSQL(schema, table, pos.Name, pos.Pos, tiBytes, false) + location := point.MySQLLocation() + sql2, arg := cp.genUpdateSQL(schema, table, location, tiBytes, false) sqls = append(sqls, sql2) args = append(args, arg) @@ -472,28 +494,28 @@ func (cp *RemoteCheckPoint) FlushPointsExcept(tctx *tcontext.Context, exceptTabl } // GlobalPoint implements CheckPoint.GlobalPoint -func (cp *RemoteCheckPoint) GlobalPoint() mysql.Position { - return cp.globalPoint.MySQLPos() +func (cp *RemoteCheckPoint) GlobalPoint() pbinlog.Location { + return cp.globalPoint.MySQLLocation() } // TablePoint implements CheckPoint.TablePoint -func (cp *RemoteCheckPoint) TablePoint() map[string]map[string]mysql.Position { +func (cp *RemoteCheckPoint) TablePoint() map[string]map[string]pbinlog.Location { cp.RLock() defer cp.RUnlock() - tablePoint := make(map[string]map[string]mysql.Position) + tablePoint := make(map[string]map[string]pbinlog.Location) for schema, tables := range cp.points { - tablePoint[schema] = make(map[string]mysql.Position) + tablePoint[schema] = make(map[string]pbinlog.Location) for table, point := range tables { - tablePoint[schema][table] = point.MySQLPos() + tablePoint[schema][table] = point.MySQLLocation() } } return tablePoint } // FlushedGlobalPoint implements CheckPoint.FlushedGlobalPoint -func (cp *RemoteCheckPoint) FlushedGlobalPoint() mysql.Position { - return cp.globalPoint.FlushedMySQLPos() +func (cp *RemoteCheckPoint) FlushedGlobalPoint() pbinlog.Location { + return cp.globalPoint.FlushedMySQLLocation() } // String implements CheckPoint.String @@ -577,7 +599,7 @@ func (cp *RemoteCheckPoint) createTable(tctx *tcontext.Context) error { // Load implements CheckPoint.Load func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.Tracker) error { - query := `SELECT cp_schema, cp_table, binlog_name, binlog_pos, table_info, is_global FROM ` + cp.tableName + ` WHERE id = ?` + query := `SELECT cp_schema, cp_table, binlog_name, binlog_pos, binlog_gtid, table_info, is_global FROM ` + cp.tableName + ` WHERE id = ?` rows, err := cp.dbConn.querySQL(tctx, query, cp.id) defer func() { if rows != nil { @@ -601,21 +623,25 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T cpTable string binlogName string binlogPos uint32 + binlogGTID string tiBytes []byte isGlobal bool ) for rows.Next() { - err := rows.Scan(&cpSchema, &cpTable, &binlogName, &binlogPos, &tiBytes, &isGlobal) + err := rows.Scan(&cpSchema, &cpTable, &binlogName, &binlogPos, &binlogGTID, &tiBytes, &isGlobal) if err != nil { return terror.WithScope(terror.DBErrorAdapt(err, terror.ErrDBDriverError), terror.ScopeDownstream) } - pos := mysql.Position{ - Name: binlogName, - Pos: binlogPos, + location := pbinlog.Location{ + Position: mysql.Position{ + Name: binlogName, + Pos: binlogPos, + }, + GTID: binlogGTID, } if isGlobal { - if binlog.ComparePosition(pos, minCheckpoint) > 0 { - cp.globalPoint = newBinlogPoint(pos, nil, pos, nil) + if pbinlog.CompareLocation(location, minLocation) > 0 { + cp.globalPoint = newBinlogPoint(location, location, nil, nil) cp.logCtx.L().Info("fetch global checkpoint from DB", log.WrapStringerField("global checkpoint", cp.globalPoint)) } continue // skip global checkpoint @@ -640,7 +666,7 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T mSchema = make(map[string]*binlogPoint) cp.points[cpSchema] = mSchema } - mSchema[cpTable] = newBinlogPoint(pos, &ti, pos, &ti) + mSchema[cpTable] = newBinlogPoint(location, location, &ti, &ti) } return terror.WithScope(terror.DBErrorAdapt(rows.Err(), terror.ErrDBDriverError), terror.ScopeDownstream) @@ -649,14 +675,14 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T // LoadMeta implements CheckPoint.LoadMeta func (cp *RemoteCheckPoint) LoadMeta() error { var ( - pos *mysql.Position - err error + location *pbinlog.Location + err error ) switch cp.cfg.Mode { case config.ModeAll: // NOTE: syncer must continue the syncing follow loader's tail, so we parse mydumper's output // refine when master / slave switching added and checkpoint mechanism refactored - pos, err = cp.parseMetaData() + location, err = cp.parseMetaData() if err != nil { return err } @@ -666,9 +692,12 @@ func (cp *RemoteCheckPoint) LoadMeta() error { cp.logCtx.L().Warn("don't set meta in increment task-mode") return nil } - pos = &mysql.Position{ - Name: cp.cfg.Meta.BinLogName, - Pos: cp.cfg.Meta.BinLogPos, + location = &pbinlog.Location{ + Position: mysql.Position{ + Name: cp.cfg.Meta.BinLogName, + Pos: cp.cfg.Meta.BinLogPos, + }, + // GTID: , } default: // should not go here (syncer is only used in `all` or `incremental` mode) @@ -676,8 +705,8 @@ func (cp *RemoteCheckPoint) LoadMeta() error { } // if meta loaded, we will start syncing from meta's pos - if pos != nil { - cp.globalPoint = newBinlogPoint(*pos, nil, *pos, nil) + if location != nil { + cp.globalPoint = newBinlogPoint(*location, *location, nil, nil) cp.logCtx.L().Info("loaded checkpoints from meta", log.WrapStringerField("global checkpoint", cp.globalPoint)) } @@ -685,15 +714,16 @@ func (cp *RemoteCheckPoint) LoadMeta() error { } // genUpdateSQL generates SQL and arguments for update checkpoint -func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, binlogName string, binlogPos uint32, tiBytes []byte, isGlobal bool) (string, []interface{}) { +func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location pbinlog.Location, tiBytes []byte, isGlobal bool) (string, []interface{}) { // use `INSERT INTO ... ON DUPLICATE KEY UPDATE` rather than `REPLACE INTO` // to keep `create_time`, `update_time` correctly sql2 := `INSERT INTO ` + cp.tableName + ` - (id, cp_schema, cp_table, binlog_name, binlog_pos, table_info, is_global) VALUES + (id, cp_schema, cp_table, binlog_name, binlog_pos, binlog_gtid, table_info, is_global) VALUES (?, ?, ?, ?, ?, ?, ?) ON DUPLICATE KEY UPDATE binlog_name = VALUES(binlog_name), binlog_pos = VALUES(binlog_pos), + binlog_gtid = VALUES(binlog_gtid), table_info = VALUES(table_info), is_global = VALUES(is_global); ` @@ -706,13 +736,21 @@ func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, binlogName st if len(tiBytes) == 0 { tiBytes = []byte("null") } - args := []interface{}{cp.id, cpSchema, cpTable, binlogName, binlogPos, tiBytes, isGlobal} + args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, location.GTID, tiBytes, isGlobal} return sql2, args } -func (cp *RemoteCheckPoint) parseMetaData() (*mysql.Position, error) { +func (cp *RemoteCheckPoint) parseMetaData() (*pbinlog.Location, error) { // `metadata` is mydumper's output meta file name filename := path.Join(cp.cfg.Dir, "metadata") cp.logCtx.L().Info("parsing metadata from file", zap.String("file", filename)) - return utils.ParseMetaData(filename) + pos, gtid, err := utils.ParseMetaData(filename) + if err != nil { + return nil, err + } + + return &pbinlog.Location{ + Position: *pos, + GTID: gtid, + }, nil } diff --git a/syncer/job.go b/syncer/job.go index 34f38ff8eb..5daec6b2b1 100644 --- a/syncer/job.go +++ b/syncer/job.go @@ -16,9 +16,10 @@ package syncer import ( "fmt" - "github.com/siddontang/go-mysql/mysql" + //"github.com/siddontang/go-mysql/mysql" - "github.com/pingcap/dm/pkg/gtid" + //"github.com/pingcap/dm/pkg/gtid" + "github.com/pingcap/dm/pkg/binlog" ) type opType byte @@ -59,62 +60,58 @@ func (t opType) String() string { } type job struct { - tp opType - sourceSchema string - sourceTable string - targetSchema string - targetTable string - sql string - args []interface{} - key string - retry bool - pos mysql.Position - currentPos mysql.Position // exactly binlog position of current SQL - gtidSet gtid.Set - ddls []string - traceID string - traceGID string + tp opType + sourceSchema string + sourceTable string + targetSchema string + targetTable string + sql string + args []interface{} + key string + retry bool + location binlog.Location + currentLocation binlog.Location // exactly binlog position of current SQL + //gtidSet gtid.Set + ddls []string + traceID string + traceGID string } func (j *job) String() string { // only output some important information, maybe useful in execution. - return fmt.Sprintf("tp: %s, sql: %s, args: %v, key: %s, ddls: %s, last_pos: %s, current_pos: %s, gtid:%v", j.tp, j.sql, j.args, j.key, j.ddls, j.pos, j.currentPos, j.gtidSet) + return fmt.Sprintf("tp: %s, sql: %s, args: %v, key: %s, ddls: %s, last_location: %s, current_location: %s", j.tp, j.sql, j.args, j.key, j.ddls, j.location, j.currentLocation) } -func newJob(tp opType, sourceSchema, sourceTable, targetSchema, targetTable, sql string, args []interface{}, key string, pos, cmdPos mysql.Position, currentGtidSet gtid.Set, traceID string) *job { - var gs gtid.Set - if currentGtidSet != nil { - gs = currentGtidSet.Clone() - } +func newJob(tp opType, sourceSchema, sourceTable, targetSchema, targetTable, sql string, args []interface{}, key string, location, cmdLocation binlog.Location, traceID string) *job { + /* + var gs gtid.Set + if currentGtidSet != nil { + gs = currentGtidSet.Clone() + } + */ return &job{ - tp: tp, - sourceSchema: sourceSchema, - sourceTable: sourceTable, - targetSchema: targetSchema, - targetTable: targetTable, - sql: sql, - args: args, - key: key, - pos: pos, - currentPos: cmdPos, - gtidSet: gs, - retry: true, - traceID: traceID, + tp: tp, + sourceSchema: sourceSchema, + sourceTable: sourceTable, + targetSchema: targetSchema, + targetTable: targetTable, + sql: sql, + args: args, + key: key, + location: location, + currentLocation: cmdLocation, + retry: true, + traceID: traceID, } } -func newDDLJob(ddlInfo *shardingDDLInfo, ddls []string, pos, cmdPos mysql.Position, currentGtidSet gtid.Set, traceID string) *job { - var gs gtid.Set - if currentGtidSet != nil { - gs = currentGtidSet.Clone() - } +func newDDLJob(ddlInfo *shardingDDLInfo, ddls []string, location, cmdLocation binlog.Location, traceID string) *job { j := &job{ - tp: ddl, - ddls: ddls, - pos: pos, - currentPos: cmdPos, - gtidSet: gs, - traceID: traceID, + tp: ddl, + ddls: ddls, + location: location, + currentLocation: cmdLocation, + traceID: traceID, } if ddlInfo != nil { @@ -127,17 +124,12 @@ func newDDLJob(ddlInfo *shardingDDLInfo, ddls []string, pos, cmdPos mysql.Positi return j } -func newXIDJob(pos, cmdPos mysql.Position, currentGtidSet gtid.Set, traceID string) *job { - var gs gtid.Set - if currentGtidSet != nil { - gs = currentGtidSet.Clone() - } +func newXIDJob(location, cmdLocation binlog.Location, traceID string) *job { return &job{ - tp: xid, - pos: pos, - currentPos: cmdPos, - gtidSet: gs, - traceID: traceID, + tp: xid, + location: location, + currentLocation: cmdLocation, + traceID: traceID, } } @@ -147,11 +139,10 @@ func newFlushJob() *job { } } -func newSkipJob(pos mysql.Position, currentGtidSet gtid.Set) *job { +func newSkipJob(location binlog.Location) *job { return &job{ - tp: skip, - pos: pos, - gtidSet: currentGtidSet, + tp: skip, + location: location, } } diff --git a/syncer/relay.go b/syncer/relay.go index df05f6fe16..cb78238478 100644 --- a/syncer/relay.go +++ b/syncer/relay.go @@ -47,10 +47,10 @@ func (s *Syncer) setInitActiveRelayLog() error { return terror.ErrRelayNoValidRelaySubDir.Generate() } - checkPos := s.checkpoint.GlobalPoint() - if binlog.ComparePosition(checkPos, minCheckpoint) > 0 { + checkLocation := s.checkpoint.GlobalPoint() + if binlog.ComparePosition(checkLocation.Position, minPosition) > 0 { // continue from previous checkpoint - pos = checkPos + pos = checkLocation.Position extractPos = true } else if s.cfg.Mode == config.ModeIncrement { // fresh start for task-mode increment diff --git a/syncer/status.go b/syncer/status.go index c399420453..5d87de58bb 100644 --- a/syncer/status.go +++ b/syncer/status.go @@ -39,7 +39,7 @@ func (s *Syncer) Status() interface{} { s.tctx.L().Warn("fail to get master status", zap.Error(err)) } - syncerPos := s.checkpoint.FlushedGlobalPoint() + syncerLocation := s.checkpoint.FlushedGlobalPoint() if err != nil { s.tctx.L().Warn("fail to get flushed global point", zap.Error(err)) } @@ -48,11 +48,12 @@ func (s *Syncer) Status() interface{} { TotalTps: totalTps, RecentTps: tps, MasterBinlog: masterPos.String(), - SyncerBinlog: syncerPos.String(), + SyncerBinlog: syncerLocation.Position.String(), } if masterGTIDSet != nil { // masterGTIDSet maybe a nil interface st.MasterBinlogGtid = masterGTIDSet.String() } + st.SyncerBinlogGtid = syncerLocation.GTID st.BinlogType = "unknown" if s.streamerController != nil { @@ -62,9 +63,9 @@ func (s *Syncer) Status() interface{} { // If a syncer unit is waiting for relay log catch up, it has not executed // LoadMeta and will return a parsed binlog name error. As we can find mysql // position in syncer status, we record this error only in debug level. - realPos, err := binlog.RealMySQLPos(syncerPos) + realPos, err := binlog.RealMySQLPos(syncerLocation.Position) if err != nil { - s.tctx.L().Debug("fail to parse real mysql position", zap.Stringer("position", syncerPos), log.ShortError(err)) + s.tctx.L().Debug("fail to parse real mysql position", zap.Stringer("position", syncerLocation.Position), log.ShortError(err)) } st.Synced = utils.CompareBinlogPos(masterPos, realPos, 0) == 0 diff --git a/syncer/streamer_controller.go b/syncer/streamer_controller.go index 92779e2f80..c45a6dd10b 100644 --- a/syncer/streamer_controller.go +++ b/syncer/streamer_controller.go @@ -43,7 +43,7 @@ const ( // For other implementations who implement StreamerProducer and Streamer can easily take place of Syncer.streamProducer // For test is easy to mock type StreamerProducer interface { - generateStreamer(pos mysql.Position) (streamer.Streamer, error) + generateStreamer(location binlog.Location) (streamer.Streamer, error) } // Read local relay log @@ -51,18 +51,19 @@ type localBinlogReader struct { reader *streamer.BinlogReader } -func (l *localBinlogReader) generateStreamer(pos mysql.Position) (streamer.Streamer, error) { - return l.reader.StartSync(pos) +func (l *localBinlogReader) generateStreamer(location binlog.Location) (streamer.Streamer, error) { + return l.reader.StartSync(location.Position) } // Read remote binlog type remoteBinlogReader struct { reader *replication.BinlogSyncer tctx *tcontext.Context + flavor string EnableGTID bool } -func (r *remoteBinlogReader) generateStreamer(pos mysql.Position) (streamer.Streamer, error) { +func (r *remoteBinlogReader) generateStreamer(location binlog.Location) (streamer.Streamer, error) { defer func() { lastSlaveConnectionID := r.reader.LastConnectionID() r.tctx.L().Info("last slave connection", zap.Uint32("connection ID", lastSlaveConnectionID)) @@ -71,11 +72,28 @@ func (r *remoteBinlogReader) generateStreamer(pos mysql.Position) (streamer.Stre // FIXME: can enable GTID if r.EnableGTID { // NOTE: our (per-table based) checkpoint does not support GTID yet - return nil, terror.ErrSyncerUnitRemoteSteamerWithGTID.Generate() + //return nil, terror.ErrSyncerUnitRemoteSteamerWithGTID.Generate() + var gtid mysql.GTIDSet + var err error + if r.flavor == mysql.MySQLFlavor { + gtid, err := mysql.ParseMysqlGTIDSet(location.GTID) + + } else { + gtid, err := mysql.ParseMariadbGTID(location.GTID) + } + if err != nil { + // TODO: use terror + return nil, err + } + streamer, err := r.reader.StartSyncGTID(gtid) + if err != nil { + return nil, err + } + return streamer, nil } // position's name may contain uuid, so need remove it - adjustedPos := binlog.AdjustPosition(pos) + adjustedPos := binlog.AdjustPosition(location.Position) streamer, err := r.reader.StartSync(adjustedPos) return streamer, terror.ErrSyncerUnitRemoteSteamerStartSync.Delegate(err) } @@ -94,6 +112,7 @@ type StreamerController struct { currentBinlogType BinlogType syncCfg replication.BinlogSyncerConfig + enableGTID bool localBinlogDir string timezone *time.Location @@ -115,11 +134,12 @@ type StreamerController struct { } // NewStreamerController creates a new streamer controller -func NewStreamerController(tctx *tcontext.Context, syncCfg replication.BinlogSyncerConfig, fromDB *UpStreamConn, binlogType BinlogType, localBinlogDir string, timezone *time.Location) *StreamerController { +func NewStreamerController(tctx *tcontext.Context, syncCfg replication.BinlogSyncerConfig, enableGTID bool, fromDB *UpStreamConn, binlogType BinlogType, localBinlogDir string, timezone *time.Location) *StreamerController { streamerController := &StreamerController{ initBinlogType: binlogType, currentBinlogType: binlogType, syncCfg: syncCfg, + enableGTID: enableGTID, localBinlogDir: localBinlogDir, timezone: timezone, fromDB: fromDB, @@ -130,7 +150,7 @@ func NewStreamerController(tctx *tcontext.Context, syncCfg replication.BinlogSyn } // Start starts streamer controller -func (c *StreamerController) Start(tctx *tcontext.Context, pos mysql.Position) error { +func (c *StreamerController) Start(tctx *tcontext.Context, location binlog.Location) error { c.Lock() defer c.Unlock() @@ -140,9 +160,9 @@ func (c *StreamerController) Start(tctx *tcontext.Context, pos mysql.Position) e var err error if c.serverIDUpdated { - err = c.resetReplicationSyncer(tctx, pos) + err = c.resetReplicationSyncer(tctx, location) } else { - err = c.updateServerIDAndResetReplication(tctx, pos) + err = c.updateServerIDAndResetReplication(tctx, location) } if err != nil { c.close(tctx) @@ -153,14 +173,14 @@ func (c *StreamerController) Start(tctx *tcontext.Context, pos mysql.Position) e } // ResetReplicationSyncer reset the replication -func (c *StreamerController) ResetReplicationSyncer(tctx *tcontext.Context, pos mysql.Position) (err error) { +func (c *StreamerController) ResetReplicationSyncer(tctx *tcontext.Context, location binlog.Location) (err error) { c.Lock() defer c.Unlock() - return c.resetReplicationSyncer(tctx, pos) + return c.resetReplicationSyncer(tctx, location) } -func (c *StreamerController) resetReplicationSyncer(tctx *tcontext.Context, pos mysql.Position) (err error) { +func (c *StreamerController) resetReplicationSyncer(tctx *tcontext.Context, location binlog.Location) (err error) { uuidSameWithUpstream := true // close old streamerProducer @@ -170,14 +190,14 @@ func (c *StreamerController) resetReplicationSyncer(tctx *tcontext.Context, pos c.closeBinlogSyncer(tctx, t.reader) case *localBinlogReader: // check the uuid before close - uuidSameWithUpstream, err = c.checkUUIDSameWithUpstream(pos, t.reader.GetUUIDs()) + uuidSameWithUpstream, err = c.checkUUIDSameWithUpstream(location.Position, t.reader.GetUUIDs()) if err != nil { return err } t.reader.Close() default: // some other producers such as mockStreamerProducer, should not re-create - c.streamer, err = c.streamerProducer.generateStreamer(pos) + c.streamer, err = c.streamerProducer.generateStreamer(location) return err } } @@ -194,22 +214,22 @@ func (c *StreamerController) resetReplicationSyncer(tctx *tcontext.Context, pos } if c.currentBinlogType == RemoteBinlog { - c.streamerProducer = &remoteBinlogReader{replication.NewBinlogSyncer(c.syncCfg), tctx, false} + c.streamerProducer = &remoteBinlogReader{replication.NewBinlogSyncer(c.syncCfg), tctx, c.syncCfg.Flavor, c.enableGTID} } else { c.streamerProducer = &localBinlogReader{streamer.NewBinlogReader(tctx, &streamer.BinlogReaderConfig{RelayDir: c.localBinlogDir, Timezone: c.timezone})} } - c.streamer, err = c.streamerProducer.generateStreamer(pos) + c.streamer, err = c.streamerProducer.generateStreamer(location) return err } // RedirectStreamer redirects the streamer's begin position or gtid -func (c *StreamerController) RedirectStreamer(tctx *tcontext.Context, pos mysql.Position) error { +func (c *StreamerController) RedirectStreamer(tctx *tcontext.Context, location binlog.Location) error { c.Lock() defer c.Unlock() - tctx.L().Info("redirect streamer", zap.Stringer("position", pos)) - return c.resetReplicationSyncer(tctx, pos) + tctx.L().Info("redirect streamer", zap.Stringer("location", location)) + return c.resetReplicationSyncer(tctx, location) } // GetEvent returns binlog event, should only have one thread call this function. @@ -265,13 +285,13 @@ func (c *StreamerController) GetEvent(tctx *tcontext.Context) (event *replicatio } // ReopenWithRetry reopens streamer with retry -func (c *StreamerController) ReopenWithRetry(tctx *tcontext.Context, pos mysql.Position) error { +func (c *StreamerController) ReopenWithRetry(tctx *tcontext.Context, location binlog.Location) error { c.Lock() defer c.Unlock() var err error for i := 0; i < maxRetryCount; i++ { - err = c.resetReplicationSyncer(tctx, pos) + err = c.resetReplicationSyncer(tctx, location) if err == nil { return nil } @@ -407,20 +427,20 @@ func (c *StreamerController) updateServerID(tctx *tcontext.Context) error { } // UpdateServerIDAndResetReplication updates the server id and reset replication -func (c *StreamerController) UpdateServerIDAndResetReplication(tctx *tcontext.Context, pos mysql.Position) error { +func (c *StreamerController) UpdateServerIDAndResetReplication(tctx *tcontext.Context, location binlog.Location) error { c.Lock() defer c.Unlock() - return c.updateServerIDAndResetReplication(tctx, pos) + return c.updateServerIDAndResetReplication(tctx, location) } -func (c *StreamerController) updateServerIDAndResetReplication(tctx *tcontext.Context, pos mysql.Position) error { +func (c *StreamerController) updateServerIDAndResetReplication(tctx *tcontext.Context, location binlog.Location) error { err := c.updateServerID(tctx) if err != nil { return err } - err = c.resetReplicationSyncer(tctx, pos) + err = c.resetReplicationSyncer(tctx, location) if err != nil { return err } diff --git a/syncer/syncer.go b/syncer/syncer.go index f38b182341..5d300b3b7b 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -268,7 +268,7 @@ func (s *Syncer) Init(ctx context.Context) (err error) { } rollbackHolder.Add(fr.FuncRollback{Name: "close-DBs", Fn: s.closeDBs}) - s.streamerController = NewStreamerController(tctx, s.syncCfg, s.fromDB, s.binlogType, s.cfg.RelayDir, s.timezone) + s.streamerController = NewStreamerController(tctx, s.syncCfg, s.cfg.EnableGTID, s.fromDB, s.binlogType, s.cfg.RelayDir, s.timezone) s.bwList, err = filter.New(s.cfg.CaseSensitive, s.cfg.BWList) if err != nil { @@ -425,7 +425,7 @@ func (s *Syncer) initShardingGroups() error { func (s *Syncer) IsFreshTask(ctx context.Context) (bool, error) { globalPoint := s.checkpoint.GlobalPoint() tablePoint := s.checkpoint.TablePoint() - return binlog.ComparePosition(globalPoint, minCheckpoint) <= 0 && len(tablePoint) == 0, nil + return binlog.CompareLocation(globalPoint, minLocation) <= 0 && len(tablePoint) == 0, nil } func (s *Syncer) reset() { @@ -555,7 +555,7 @@ func (s *Syncer) Process(ctx context.Context, pr chan pb.ProcessResult) { prePos := s.checkpoint.GlobalPoint() s.checkpoint.Rollback(s.schemaTracker) currPos := s.checkpoint.GlobalPoint() - if binlog.ComparePosition(prePos, currPos) != 0 { + if binlog.CompareLocation(prePos, currPos) != 0 { s.tctx.L().Warn("something wrong with rollback global checkpoint", zap.Stringer("previous position", prePos), zap.Stringer("current position", currPos)) } @@ -678,16 +678,16 @@ func (s *Syncer) checkWait(job *job) bool { return false } -func (s *Syncer) saveTablePoint(db, table string, pos mysql.Position) { +func (s *Syncer) saveTablePoint(db, table string, location binlog.Location) { ti, err := s.schemaTracker.GetTable(db, table) if err != nil { s.tctx.L().DPanic("table info missing from schema tracker", zap.String("schema", db), zap.String("table", table), - zap.Stringer("pos", pos), + zap.Stringer("location", location), zap.Error(err)) } - s.checkpoint.SaveTablePoint(db, table, pos, ti) + s.checkpoint.SaveTablePoint(db, table, location, ti) } func (s *Syncer) addJob(job *job) error { @@ -696,7 +696,7 @@ func (s *Syncer) addJob(job *job) error { ) switch job.tp { case xid: - s.saveGlobalPoint(job.pos) + s.saveGlobalPoint(job.location) return nil case flush: addedJobsTotal.WithLabelValues("flush", s.cfg.Name, adminQueueName).Inc() @@ -730,16 +730,16 @@ func (s *Syncer) addJob(job *job) error { switch job.tp { case ddl: // only save checkpoint for DDL and XID (see above) - s.saveGlobalPoint(job.pos) + s.saveGlobalPoint(job.location) if len(job.sourceSchema) > 0 { - s.saveTablePoint(job.sourceSchema, job.sourceTable, job.pos) + s.saveTablePoint(job.sourceSchema, job.sourceTable, job.location) } // reset sharding group after checkpoint saved s.resetShardingGroup(job.targetSchema, job.targetTable) case insert, update, del: // save job's current pos for DML events if len(job.sourceSchema) > 0 { - s.saveTablePoint(job.sourceSchema, job.sourceTable, job.currentPos) + s.saveTablePoint(job.sourceSchema, job.sourceTable, job.currentLocation) } } @@ -750,11 +750,12 @@ func (s *Syncer) addJob(job *job) error { return nil } -func (s *Syncer) saveGlobalPoint(globalPoint mysql.Position) { +func (s *Syncer) saveGlobalPoint(globalLocation binlog.Location) { if s.cfg.IsSharding { - globalPoint = s.sgk.AdjustGlobalPoint(globalPoint) + // TODO: maybe need to compare GTID? + globalLocation.Position = s.sgk.AdjustGlobalPoint(globalLocation.Position) } - s.checkpoint.SaveGlobalPoint(globalPoint) + s.checkpoint.SaveGlobalPoint(globalLocation) } func (s *Syncer) resetShardingGroup(schema, table string) { @@ -807,7 +808,7 @@ func (s *Syncer) flushCheckPoints() error { s.tctx.L().Info("flushed checkpoint", zap.Stringer("checkpoint", s.checkpoint)) // update current active relay log after checkpoint flushed - err = s.updateActiveRelayLog(s.checkpoint.GlobalPoint()) + err = s.updateActiveRelayLog(s.checkpoint.GlobalPoint().Position) if err != nil { return err } @@ -838,9 +839,9 @@ func (s *Syncer) syncDDL(tctx *tcontext.Context, queueBucket string, db *DBConn, } if err != nil { s.appendExecErrors(&ExecErrorContext{ - err: err, - pos: sqlJob.currentPos, - jobs: fmt.Sprintf("%v", sqlJob.ddls), + err: err, + location: sqlJob.currentLocation, + jobs: fmt.Sprintf("%v", sqlJob.ddls), }) } @@ -910,7 +911,7 @@ func (s *Syncer) sync(tctx *tcontext.Context, queueBucket string, db *DBConn, jo } affected, err := db.executeSQL(tctx, queries, args...) if err != nil { - errCtx := &ExecErrorContext{err, jobs[affected].currentPos, fmt.Sprintf("%v", jobs)} + errCtx := &ExecErrorContext{err, jobs[affected].currentLocation, fmt.Sprintf("%v", jobs)} s.appendExecErrors(errCtx) } return err @@ -985,13 +986,13 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // we use currentPos to replace and skip binlog event of specified position and update table checkpoint in sharding ddl // we use lastPos to update global checkpoint and table checkpoint var ( - currentPos = s.checkpoint.GlobalPoint() // also init to global checkpoint - lastPos = s.checkpoint.GlobalPoint() + currentLocation = s.checkpoint.GlobalPoint() // also init to global checkpoint + lastLocation = s.checkpoint.GlobalPoint() ) - s.tctx.L().Info("replicate binlog from checkpoint", zap.Stringer("checkpoint", lastPos)) + s.tctx.L().Info("replicate binlog from checkpoint", zap.Stringer("checkpoint", lastLocation)) if s.streamerController.IsClosed() { - err = s.streamerController.Start(tctx, lastPos) + err = s.streamerController.Start(tctx, lastLocation) if err != nil { return terror.Annotate(err, "fail to restart streamer controller") } diff --git a/syncer/warning.go b/syncer/warning.go index 4a95ee1d81..8df932e77e 100644 --- a/syncer/warning.go +++ b/syncer/warning.go @@ -17,17 +17,18 @@ import ( "fmt" "sort" - "github.com/siddontang/go-mysql/mysql" + //"github.com/siddontang/go-mysql/mysql" "github.com/pingcap/dm/dm/pb" + "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/utils" ) // ExecErrorContext records a failed exec SQL information type ExecErrorContext struct { - err error - pos mysql.Position - jobs string + err error + location binlog.Location + jobs string } // Error implements SubTaskUnit.Error From dd80b84ed2791694e54491dfe47b70ae89e72e71 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Wed, 4 Mar 2020 23:53:58 +0800 Subject: [PATCH 02/35] update --- dm/pb/dmworker.pb.go | 290 +++++++++++++++--------------- dm/proto/dmworker.proto | 2 +- syncer/inject_sql.go | 8 +- syncer/job.go | 2 +- syncer/operator.go | 7 +- syncer/sharding-meta/shardmeta.go | 20 +-- syncer/sharding_group.go | 116 ++++++------ syncer/syncer.go | 235 ++++++++++++------------ syncer/warning.go | 2 +- 9 files changed, 351 insertions(+), 331 deletions(-) diff --git a/dm/pb/dmworker.pb.go b/dm/pb/dmworker.pb.go index bcda1f0fa3..efbd0db9a2 100644 --- a/dm/pb/dmworker.pb.go +++ b/dm/pb/dmworker.pb.go @@ -1130,11 +1130,11 @@ func (m *LoadStatus) GetMetaBinlog() string { // synced: synced source tables // unsynced: unsynced source tables type ShardingGroup struct { - Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` - DDLs []string `protobuf:"bytes,2,rep,name=DDLs,proto3" json:"DDLs,omitempty"` - FirstPos string `protobuf:"bytes,3,opt,name=firstPos,proto3" json:"firstPos,omitempty"` - Synced []string `protobuf:"bytes,4,rep,name=synced,proto3" json:"synced,omitempty"` - Unsynced []string `protobuf:"bytes,5,rep,name=unsynced,proto3" json:"unsynced,omitempty"` + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + DDLs []string `protobuf:"bytes,2,rep,name=DDLs,proto3" json:"DDLs,omitempty"` + FirstLocation string `protobuf:"bytes,3,opt,name=firstLocation,proto3" json:"firstLocation,omitempty"` + Synced []string `protobuf:"bytes,4,rep,name=synced,proto3" json:"synced,omitempty"` + Unsynced []string `protobuf:"bytes,5,rep,name=unsynced,proto3" json:"unsynced,omitempty"` } func (m *ShardingGroup) Reset() { *m = ShardingGroup{} } @@ -1184,9 +1184,9 @@ func (m *ShardingGroup) GetDDLs() []string { return nil } -func (m *ShardingGroup) GetFirstPos() string { +func (m *ShardingGroup) GetFirstLocation() string { if m != nil { - return m.FirstPos + return m.FirstLocation } return "" } @@ -2866,137 +2866,137 @@ func init() { func init() { proto.RegisterFile("dmworker.proto", fileDescriptor_51a1b9e17fd67b10) } var fileDescriptor_51a1b9e17fd67b10 = []byte{ - // 2077 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x18, 0x4d, 0x6f, 0x23, 0x49, - 0xd5, 0xdd, 0xfe, 0x88, 0xfd, 0x6c, 0x67, 0x3a, 0x95, 0x6c, 0xd6, 0x63, 0x76, 0x8d, 0xe9, 0x59, - 0xed, 0x66, 0x7d, 0x88, 0xd8, 0x00, 0x42, 0x02, 0x2d, 0xb0, 0xe3, 0x64, 0x66, 0x02, 0xce, 0x4c, - 0xd2, 0xce, 0xc0, 0x11, 0x75, 0xdc, 0x15, 0xa7, 0x15, 0xbb, 0xbb, 0xa7, 0x3f, 0x12, 0xf2, 0x1b, - 0x90, 0x80, 0x2b, 0x82, 0x1b, 0x42, 0x5c, 0x10, 0x7f, 0x03, 0x8e, 0x7b, 0xe4, 0x06, 0x9a, 0xf9, - 0x1b, 0x1c, 0xd0, 0x7b, 0x55, 0xdd, 0x5d, 0x1d, 0x7f, 0xcc, 0x8c, 0x04, 0x97, 0x56, 0xbf, 0x8f, - 0x7a, 0xf5, 0xea, 0x7d, 0x56, 0x3d, 0xd8, 0x74, 0xe6, 0xb7, 0x7e, 0x78, 0xcd, 0xc3, 0xfd, 0x20, - 0xf4, 0x63, 0x9f, 0xe9, 0xc1, 0x85, 0xf9, 0x39, 0x6c, 0x8f, 0x63, 0x3b, 0x8c, 0xc7, 0xc9, 0xc5, - 0xb9, 0x1d, 0x5d, 0x5b, 0xfc, 0x55, 0xc2, 0xa3, 0x98, 0x31, 0xa8, 0xc4, 0x76, 0x74, 0xdd, 0xd1, - 0xfa, 0xda, 0x5e, 0xc3, 0xa2, 0x7f, 0x73, 0x1f, 0xd8, 0xcb, 0xc0, 0xb1, 0x63, 0x6e, 0xf1, 0x99, - 0x7d, 0x97, 0x72, 0x76, 0x60, 0x63, 0xe2, 0x7b, 0x31, 0xf7, 0x62, 0xc9, 0x9c, 0x82, 0xe6, 0x18, - 0xb6, 0x4f, 0xdc, 0x69, 0x78, 0x7f, 0x41, 0x0f, 0xe0, 0xb1, 0xeb, 0xcd, 0xfc, 0xe9, 0x73, 0x7b, - 0xce, 0xe5, 0x1a, 0x05, 0xc3, 0x3e, 0x82, 0x86, 0x80, 0x4e, 0xfd, 0xa8, 0xa3, 0xf7, 0xb5, 0xbd, - 0xb6, 0x95, 0x23, 0xcc, 0xa7, 0xf0, 0xc1, 0x8b, 0x80, 0xa3, 0xd0, 0x7b, 0x1a, 0x77, 0x41, 0xf7, - 0x03, 0x12, 0xb7, 0x79, 0x00, 0xfb, 0xc1, 0xc5, 0x3e, 0x12, 0x5f, 0x04, 0x96, 0xee, 0x07, 0x78, - 0x1a, 0x0f, 0x37, 0xd3, 0xc5, 0x69, 0xf0, 0xdf, 0xbc, 0x81, 0xdd, 0xfb, 0x82, 0xa2, 0xc0, 0xf7, - 0x22, 0xbe, 0x56, 0xd2, 0x2e, 0xd4, 0x42, 0x1e, 0x25, 0xb3, 0x98, 0x64, 0xd5, 0x2d, 0x09, 0x21, - 0x3e, 0xf2, 0x93, 0x70, 0xc2, 0x3b, 0x65, 0xda, 0x43, 0x42, 0xcc, 0x80, 0xf2, 0x3c, 0x9a, 0x76, - 0x2a, 0x84, 0xc4, 0x5f, 0x73, 0x00, 0x3b, 0xc2, 0x8a, 0xef, 0x60, 0xf1, 0x3d, 0x60, 0x67, 0x09, - 0x0f, 0xef, 0xc6, 0xb1, 0x1d, 0x27, 0x91, 0xc2, 0xe9, 0xe5, 0xa6, 0x13, 0xa7, 0xf9, 0x0c, 0xb6, - 0x88, 0xf3, 0x28, 0x0c, 0xfd, 0x70, 0x1d, 0xe3, 0x1f, 0x34, 0xe8, 0x3c, 0xb3, 0x3d, 0x67, 0x96, - 0xee, 0x3f, 0x3e, 0x1b, 0xad, 0x93, 0xcc, 0x1e, 0x92, 0x35, 0x74, 0xb2, 0x46, 0x03, 0xad, 0x31, - 0x3e, 0x1b, 0xe5, 0x66, 0xb5, 0xc3, 0x69, 0xd4, 0x29, 0xf7, 0xcb, 0xc8, 0x8e, 0xff, 0xe8, 0xbd, - 0x8b, 0xcc, 0x7b, 0xe2, 0xd8, 0x39, 0x02, 0x7d, 0x1f, 0xbd, 0x9a, 0x9d, 0xda, 0x71, 0xcc, 0x43, - 0xaf, 0x53, 0x15, 0xbe, 0xcf, 0x31, 0x66, 0x00, 0x3b, 0x43, 0x7f, 0x3e, 0xf7, 0xbd, 0x5f, 0x50, - 0x9c, 0x66, 0x2e, 0xc9, 0xcd, 0xae, 0x15, 0xcc, 0x2e, 0xcd, 0xab, 0x67, 0xe6, 0x5d, 0xe9, 0x88, - 0x5d, 0xa8, 0x89, 0xd8, 0x97, 0x4a, 0x49, 0xc8, 0xfc, 0x9b, 0x06, 0xdb, 0x05, 0x1b, 0xbf, 0xf7, - 0x8e, 0xdf, 0x85, 0x96, 0xd8, 0x43, 0x48, 0xa0, 0x7d, 0x9b, 0x07, 0x06, 0x99, 0x4a, 0xc1, 0x5b, - 0x05, 0x2e, 0xf6, 0x7d, 0x68, 0x47, 0xd2, 0x01, 0x62, 0x59, 0xa5, 0x5f, 0xde, 0x6b, 0x1e, 0x6c, - 0xd1, 0x32, 0x95, 0x60, 0x15, 0xf9, 0xcc, 0xbf, 0x68, 0x32, 0x28, 0xa4, 0xab, 0xdf, 0x5b, 0xdf, - 0x2f, 0xa0, 0x29, 0xf4, 0x22, 0x01, 0x52, 0xdd, 0x07, 0xb9, 0xba, 0x42, 0xae, 0xca, 0x43, 0x47, - 0x14, 0x4a, 0x88, 0x35, 0x42, 0x57, 0x43, 0xd1, 0x55, 0x2c, 0x2a, 0x70, 0x99, 0x7f, 0xd6, 0xa0, - 0x39, 0xbc, 0xe2, 0x13, 0xa9, 0x39, 0xaa, 0x18, 0xd8, 0x51, 0xc4, 0x9d, 0x54, 0x45, 0x01, 0xb1, - 0x1d, 0xa8, 0xc6, 0x7e, 0x6c, 0xcf, 0x48, 0xc9, 0xaa, 0x25, 0x00, 0x0a, 0x95, 0x64, 0x32, 0xe1, - 0x51, 0x74, 0x99, 0xcc, 0x48, 0xcb, 0xaa, 0xa5, 0x60, 0x50, 0xda, 0xa5, 0xed, 0xce, 0xb8, 0x43, - 0x0e, 0xad, 0x5a, 0x12, 0xc2, 0x7a, 0x74, 0x6b, 0x87, 0x9e, 0xeb, 0x4d, 0x29, 0xbe, 0xaa, 0x56, - 0x0a, 0xe2, 0x0a, 0x87, 0xc7, 0xb6, 0x3b, 0xeb, 0xd4, 0xfa, 0xda, 0x5e, 0xcb, 0x92, 0x90, 0xd9, - 0x02, 0x38, 0x4c, 0xe6, 0x81, 0xb4, 0xef, 0x6f, 0x34, 0x80, 0x91, 0x6f, 0x3b, 0x52, 0xe9, 0x4f, - 0xa0, 0x7d, 0xe9, 0x7a, 0x6e, 0x74, 0xc5, 0x9d, 0xc7, 0x77, 0x31, 0x8f, 0x48, 0xf7, 0xb2, 0x55, - 0x44, 0xa2, 0xb2, 0xa4, 0xb5, 0x60, 0xd1, 0x89, 0x45, 0xc1, 0xb0, 0x2e, 0xd4, 0x83, 0xd0, 0x9f, - 0x86, 0x3c, 0x8a, 0x64, 0x5c, 0x66, 0x30, 0xae, 0x9d, 0xf3, 0xd8, 0x16, 0x25, 0x4e, 0x46, 0xa7, - 0x82, 0x31, 0x7f, 0xad, 0x41, 0x7b, 0x7c, 0x65, 0x87, 0x8e, 0xeb, 0x4d, 0x9f, 0x86, 0x7e, 0x42, - 0x45, 0x28, 0xb6, 0xc3, 0x29, 0x4f, 0x2b, 0xae, 0x84, 0x30, 0x1f, 0x0f, 0x0f, 0x47, 0xb8, 0x3f, - 0xe5, 0x23, 0xfe, 0xe3, 0xce, 0x97, 0x6e, 0x18, 0xc5, 0x98, 0x8e, 0x72, 0xe7, 0x14, 0xa6, 0x5c, - 0xb9, 0xf3, 0x26, 0x64, 0xc2, 0x32, 0xe5, 0x0a, 0x41, 0xb8, 0x26, 0xf1, 0x24, 0xa5, 0x4a, 0x94, - 0x0c, 0x36, 0xff, 0x54, 0x06, 0x18, 0xdf, 0x79, 0x13, 0x69, 0x9e, 0x3e, 0x34, 0xe9, 0x98, 0x47, - 0x37, 0xdc, 0x8b, 0x53, 0xe3, 0xa8, 0x28, 0x14, 0x46, 0xe0, 0x79, 0x90, 0x1a, 0x26, 0x83, 0xb1, - 0x58, 0x84, 0x7c, 0xc2, 0xbd, 0x18, 0x89, 0x65, 0x22, 0xe6, 0x08, 0x66, 0x42, 0x6b, 0x6e, 0x47, - 0x31, 0x0f, 0x0b, 0xa6, 0x29, 0xe0, 0xd8, 0x00, 0x0c, 0x15, 0x7e, 0x1a, 0xbb, 0x8e, 0x2c, 0x2b, - 0x0b, 0x78, 0x94, 0x47, 0x87, 0x48, 0xe5, 0xd5, 0x84, 0x3c, 0x15, 0x87, 0xf2, 0x54, 0x98, 0xe4, - 0x6d, 0x08, 0x79, 0xf7, 0xf1, 0x28, 0xef, 0x62, 0xe6, 0x4f, 0xae, 0x5d, 0x6f, 0x4a, 0x66, 0xaf, - 0x93, 0xa9, 0x0a, 0x38, 0xf6, 0x25, 0x18, 0x89, 0x17, 0xf2, 0xc8, 0x9f, 0xdd, 0x70, 0x87, 0xbc, - 0x17, 0x75, 0x1a, 0x4a, 0xa6, 0xab, 0x7e, 0xb5, 0x16, 0x58, 0x15, 0x0f, 0x81, 0x48, 0x19, 0xe9, - 0xa1, 0x1e, 0x80, 0x28, 0xaa, 0xe7, 0x77, 0x01, 0xef, 0x34, 0x45, 0xcc, 0xe4, 0x18, 0xf3, 0x8f, - 0x1a, 0xb4, 0xd4, 0xe2, 0xa3, 0x94, 0x45, 0x6d, 0x45, 0x59, 0xd4, 0xd5, 0xb2, 0xc8, 0x3e, 0xcf, - 0xca, 0x89, 0xa8, 0x0f, 0xa4, 0xed, 0x69, 0xe8, 0x63, 0xf6, 0x59, 0x44, 0xc8, 0x2a, 0xcc, 0x17, - 0xd0, 0x0c, 0xb1, 0xbf, 0x67, 0x75, 0x2c, 0xab, 0x27, 0x56, 0x8e, 0xb6, 0x54, 0x1e, 0xf3, 0xef, - 0x3a, 0x34, 0x15, 0xe2, 0x82, 0xa7, 0xb5, 0x77, 0xf4, 0xb4, 0xbe, 0xc2, 0xd3, 0xfd, 0x54, 0xa5, - 0xe4, 0xe2, 0xd0, 0x0d, 0x65, 0xdc, 0xab, 0xa8, 0x8c, 0xa3, 0x10, 0x5a, 0x2a, 0x8a, 0xed, 0xc1, - 0x03, 0x05, 0x54, 0x02, 0xeb, 0x3e, 0x9a, 0xed, 0x03, 0x23, 0xd4, 0xd0, 0x8e, 0x27, 0x57, 0x2f, - 0x83, 0x13, 0xd2, 0x86, 0xa2, 0xab, 0x6e, 0x2d, 0xa1, 0xb0, 0x6f, 0x42, 0x35, 0x8a, 0xed, 0x29, - 0xa7, 0xc0, 0x4a, 0x9b, 0x2a, 0x22, 0x2c, 0x81, 0x57, 0x8c, 0x5f, 0x7f, 0x8b, 0xf1, 0xcd, 0xff, - 0xe8, 0xd0, 0x2e, 0xb4, 0x8b, 0xa5, 0x3d, 0x3c, 0xdb, 0x51, 0x5f, 0xb1, 0x63, 0x1f, 0x2a, 0x89, - 0xe7, 0x0a, 0x67, 0x6f, 0x1e, 0xb4, 0x90, 0xfe, 0xd2, 0x73, 0x63, 0x8c, 0x25, 0x8b, 0x28, 0x8a, - 0x4e, 0x95, 0xb7, 0x05, 0xc4, 0xb7, 0x61, 0x3b, 0x0f, 0xe4, 0xc3, 0xc3, 0xd1, 0xc8, 0x9f, 0x5c, - 0x1f, 0x1f, 0x4a, 0xeb, 0x2d, 0x23, 0x31, 0x26, 0x9a, 0x14, 0x25, 0xe4, 0xb3, 0x92, 0x68, 0x53, - 0x9f, 0x41, 0x75, 0x82, 0xcd, 0x83, 0xac, 0x24, 0x03, 0x4a, 0xe9, 0x26, 0xcf, 0x4a, 0x96, 0xa0, - 0xb3, 0x4f, 0xa0, 0xe2, 0x24, 0xf3, 0x40, 0xda, 0x6a, 0x13, 0xf9, 0xf2, 0x72, 0xfe, 0xac, 0x64, - 0x11, 0x15, 0xb9, 0x66, 0xbe, 0xed, 0x74, 0x1a, 0x39, 0x57, 0x5e, 0xe5, 0x91, 0x0b, 0xa9, 0xc8, - 0x85, 0x19, 0x46, 0xd9, 0x26, 0xb9, 0xf2, 0x62, 0x87, 0x5c, 0x48, 0x7d, 0x5c, 0x87, 0x5a, 0x24, - 0x02, 0xf9, 0x47, 0xb0, 0x55, 0xb0, 0xfe, 0xc8, 0x8d, 0xc8, 0x54, 0x82, 0xdc, 0xd1, 0x56, 0xf5, - 0xf4, 0x74, 0x7d, 0x0f, 0x80, 0xce, 0x24, 0xda, 0xac, 0xec, 0xd5, 0x5a, 0x7e, 0x59, 0xfc, 0x18, - 0x1a, 0x78, 0x96, 0x35, 0x64, 0x3c, 0xc4, 0x2a, 0x72, 0x00, 0x2d, 0xd2, 0xfe, 0x6c, 0xb4, 0x82, - 0x83, 0x1d, 0xc0, 0x8e, 0x68, 0x9b, 0xd9, 0x05, 0xdb, 0x8d, 0x5d, 0xdf, 0x93, 0x89, 0xb5, 0x94, - 0x86, 0x05, 0x9d, 0xa3, 0xb8, 0xf1, 0xd9, 0x28, 0xed, 0x28, 0x29, 0x6c, 0x7e, 0x0f, 0x1a, 0xb8, - 0xa3, 0xd8, 0x6e, 0x0f, 0x6a, 0x44, 0x48, 0xed, 0x60, 0x64, 0xe6, 0x94, 0x0a, 0x59, 0x92, 0x6e, - 0xfe, 0x56, 0x2b, 0xdc, 0x49, 0xde, 0xbb, 0x5a, 0xf5, 0x17, 0xaf, 0x34, 0x8d, 0xe2, 0x0d, 0x66, - 0x1f, 0x80, 0x0a, 0x4e, 0x7a, 0x7f, 0xc9, 0xdc, 0x9b, 0x63, 0x2d, 0x85, 0x03, 0x1d, 0x93, 0x43, - 0x4b, 0x4c, 0xfb, 0x7b, 0x1d, 0x5a, 0xea, 0xd5, 0xe7, 0xff, 0x95, 0x76, 0x4c, 0x79, 0x3f, 0xa4, - 0x99, 0xf1, 0x69, 0x9a, 0x19, 0xd5, 0xfc, 0x18, 0x79, 0x14, 0xe5, 0x89, 0xf1, 0x48, 0x26, 0x46, - 0x8d, 0xd8, 0xda, 0x69, 0x62, 0xa4, 0x5c, 0x22, 0x2f, 0x1e, 0xc9, 0xbc, 0xd8, 0xc8, 0x99, 0xb2, - 0x90, 0xca, 0xd2, 0xe2, 0x91, 0x4c, 0x8b, 0x7a, 0xce, 0x94, 0xb9, 0x39, 0xcb, 0x8a, 0x0d, 0xa8, - 0x92, 0x3b, 0xcd, 0x1f, 0x80, 0xa1, 0x9a, 0x86, 0x72, 0xe2, 0x53, 0x49, 0x2c, 0x84, 0x82, 0x7a, - 0x75, 0x94, 0x6b, 0x5f, 0x41, 0xbb, 0x50, 0x54, 0xb0, 0xd3, 0xb9, 0xd1, 0xd0, 0xf6, 0x26, 0x7c, - 0x96, 0x5d, 0x1c, 0x15, 0x8c, 0x12, 0x64, 0x7a, 0x2e, 0x59, 0x8a, 0x28, 0x04, 0x99, 0x72, 0xfd, - 0x2b, 0x17, 0xae, 0x7f, 0x7f, 0xd5, 0xa0, 0x76, 0x2e, 0x9c, 0xd8, 0x81, 0x8d, 0xa3, 0x30, 0x1c, - 0xfa, 0x8e, 0xf0, 0x63, 0xd5, 0x4a, 0x41, 0x0c, 0x7a, 0xfc, 0x9d, 0xd9, 0x51, 0x24, 0xaf, 0xa9, - 0x19, 0x2c, 0x69, 0xe3, 0x89, 0x1f, 0x70, 0x79, 0x4f, 0xcd, 0x60, 0x49, 0x1b, 0xf1, 0x1b, 0x3e, - 0x93, 0xf7, 0xd4, 0x0c, 0xc6, 0xdd, 0x4e, 0x78, 0x14, 0x61, 0x80, 0x88, 0xda, 0x98, 0x82, 0xb8, - 0xca, 0xb2, 0x6f, 0x87, 0x76, 0x12, 0x71, 0x79, 0x4b, 0xc9, 0x60, 0x93, 0x43, 0x4b, 0x3d, 0x1e, - 0xfb, 0x16, 0x54, 0xe8, 0x12, 0x20, 0xde, 0xab, 0xe4, 0x1b, 0x22, 0x88, 0x20, 0xc2, 0xef, 0x92, - 0x37, 0x40, 0x3f, 0x75, 0x87, 0xe8, 0xee, 0xe2, 0x95, 0x5b, 0x70, 0x44, 0x17, 0x3a, 0xe3, 0x5b, - 0x37, 0x9e, 0x5c, 0x51, 0x1a, 0x88, 0xce, 0x25, 0x9f, 0x89, 0xe6, 0x01, 0x6c, 0xcb, 0xa7, 0x73, - 0xe1, 0x61, 0xff, 0x0d, 0xe5, 0xdd, 0xdc, 0xcc, 0x72, 0x4b, 0xbc, 0x15, 0xcd, 0x04, 0x76, 0x8a, - 0x6b, 0xe4, 0xbb, 0x65, 0xdd, 0xa2, 0xff, 0xc1, 0x6b, 0xfb, 0x16, 0xb6, 0x4e, 0x93, 0x70, 0x5a, - 0x54, 0xb4, 0x0b, 0x75, 0xd7, 0xb3, 0x27, 0xb1, 0x7b, 0xc3, 0x65, 0x44, 0x65, 0x30, 0x3d, 0xc3, - 0x5d, 0x39, 0x2a, 0x28, 0x5b, 0xf4, 0x2f, 0xee, 0xd0, 0x33, 0x4e, 0xf9, 0x9d, 0xdd, 0xa1, 0x05, - 0x4c, 0xaa, 0x88, 0x5b, 0x86, 0x7c, 0x57, 0x0a, 0x08, 0xed, 0x47, 0xaf, 0x34, 0xf1, 0x90, 0x1d, - 0xfa, 0xde, 0xa5, 0x3b, 0x4d, 0xed, 0x77, 0x0b, 0x0f, 0x97, 0xd0, 0xde, 0xf2, 0x90, 0xcb, 0xcf, - 0xac, 0x2f, 0x3b, 0x73, 0x39, 0x77, 0xae, 0x32, 0x91, 0xa9, 0x14, 0x26, 0x32, 0x83, 0x5f, 0x42, - 0x4d, 0xcc, 0x32, 0x58, 0x1b, 0x1a, 0xc7, 0xde, 0x8d, 0x3d, 0x73, 0x9d, 0x17, 0x81, 0x51, 0x62, - 0x75, 0xa8, 0x8c, 0x63, 0x3f, 0x30, 0x34, 0xd6, 0x80, 0xea, 0x29, 0xc6, 0x99, 0xa1, 0x33, 0x80, - 0x1a, 0x26, 0xe1, 0x9c, 0x1b, 0x65, 0x44, 0xd3, 0x98, 0xc8, 0xa8, 0x20, 0x5a, 0x0c, 0x30, 0x8c, - 0x2a, 0xdb, 0x04, 0xf8, 0x2a, 0x89, 0x7d, 0xc9, 0x56, 0x1b, 0x0c, 0xa0, 0x4a, 0xe3, 0x01, 0x12, - 0xf8, 0xb3, 0xe3, 0x53, 0xa3, 0xc4, 0x9a, 0xb0, 0x61, 0x1d, 0x9d, 0x8e, 0xbe, 0x1a, 0x1e, 0x19, - 0x1a, 0xae, 0x3d, 0x7e, 0xfe, 0xd3, 0xa3, 0xe1, 0xb9, 0xa1, 0x0f, 0x7e, 0x4e, 0x22, 0xa7, 0x78, - 0x82, 0x96, 0xd4, 0x85, 0x60, 0xa3, 0xc4, 0x36, 0xa0, 0xfc, 0x9c, 0xdf, 0x1a, 0x1a, 0x2d, 0x4e, - 0x3c, 0x7c, 0xbd, 0x09, 0x7d, 0x48, 0x35, 0xc7, 0x28, 0x23, 0x01, 0x15, 0x0e, 0xb8, 0x63, 0x54, - 0x58, 0x0b, 0xea, 0x4f, 0xe4, 0x73, 0xcc, 0xa8, 0x0e, 0x5e, 0x40, 0x3d, 0x2d, 0xa2, 0xec, 0x01, - 0x34, 0xa5, 0x68, 0x44, 0x19, 0x25, 0x3c, 0x07, 0x95, 0x4a, 0x43, 0x43, 0x15, 0xb1, 0x1c, 0x1a, - 0x3a, 0xfe, 0x61, 0xcd, 0x33, 0xca, 0xa4, 0xf6, 0x9d, 0x37, 0x31, 0x2a, 0xc8, 0x48, 0x31, 0x63, - 0x38, 0x83, 0x1f, 0x42, 0x23, 0xcb, 0x28, 0x54, 0xf6, 0xa5, 0x77, 0xed, 0xf9, 0xb7, 0x1e, 0xe1, - 0xc4, 0x01, 0x8f, 0x7e, 0xc5, 0xb1, 0xa9, 0x19, 0x1a, 0x6e, 0x48, 0xf2, 0x9f, 0x50, 0xe7, 0x34, - 0xf4, 0xc1, 0x09, 0x6c, 0xc8, 0x88, 0x66, 0x0c, 0x36, 0xa5, 0x32, 0x12, 0x63, 0x94, 0xd0, 0x0f, - 0x78, 0x0e, 0xb1, 0x95, 0x86, 0xf6, 0xa4, 0x23, 0x0a, 0x58, 0x47, 0x71, 0xc2, 0xb6, 0x02, 0x51, - 0x3e, 0xf8, 0x57, 0x0d, 0x6a, 0x22, 0x6c, 0xd8, 0x10, 0x5a, 0xea, 0xe4, 0x8e, 0x7d, 0x28, 0xdb, - 0xcb, 0xfd, 0x59, 0x5e, 0xb7, 0x43, 0x0d, 0x62, 0xc9, 0x58, 0xc5, 0x2c, 0xb1, 0x63, 0xd8, 0x2c, - 0x4e, 0xc1, 0xd8, 0x43, 0xe4, 0x5e, 0x3a, 0x62, 0xeb, 0x76, 0x97, 0x91, 0x32, 0x51, 0x47, 0xd0, - 0x2e, 0x0c, 0xb6, 0x18, 0xed, 0xbb, 0x6c, 0xd6, 0xb5, 0x56, 0xa3, 0x9f, 0x40, 0x53, 0x99, 0xc7, - 0xb0, 0x5d, 0x64, 0x5d, 0x1c, 0x82, 0x75, 0x3f, 0x5c, 0xc0, 0x67, 0x12, 0xbe, 0x04, 0xc8, 0x07, - 0x24, 0xec, 0x83, 0x8c, 0x51, 0x9d, 0x8d, 0x75, 0x77, 0xef, 0xa3, 0xb3, 0xe5, 0x4f, 0x00, 0xe4, - 0x80, 0xec, 0x6c, 0x14, 0xb1, 0x8f, 0x90, 0x6f, 0xd5, 0xc0, 0x6c, 0xed, 0x41, 0x4e, 0x60, 0x6b, - 0xa1, 0x82, 0x0a, 0x71, 0xab, 0x0a, 0xeb, 0x5a, 0x71, 0x43, 0x68, 0xa9, 0x05, 0x54, 0xb8, 0x7b, - 0x49, 0x19, 0x16, 0x42, 0x96, 0xd5, 0x5a, 0xb3, 0xc4, 0x7e, 0x0c, 0x90, 0x97, 0x43, 0x61, 0x9a, - 0x85, 0xf2, 0xb8, 0x56, 0x8b, 0xa7, 0xb0, 0xa5, 0xcc, 0x80, 0x45, 0xe9, 0x12, 0x3e, 0x5a, 0x1c, - 0x0d, 0xaf, 0x15, 0x64, 0xc9, 0x81, 0xa5, 0x5a, 0x03, 0x85, 0x75, 0x56, 0x95, 0xcd, 0xee, 0xc7, - 0x2b, 0xa8, 0xaa, 0x89, 0xd4, 0x81, 0xb3, 0x30, 0xd1, 0x92, 0x11, 0xf4, 0x3a, 0xc5, 0x1e, 0x77, - 0xfe, 0xf1, 0xba, 0xa7, 0x7d, 0xfd, 0xba, 0xa7, 0xfd, 0xfb, 0x75, 0x4f, 0xfb, 0xdd, 0x9b, 0x5e, - 0xe9, 0xeb, 0x37, 0xbd, 0xd2, 0x3f, 0xdf, 0xf4, 0x4a, 0x17, 0x35, 0x9a, 0x9a, 0x7f, 0xe7, 0xbf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x59, 0xfd, 0x9f, 0x47, 0x17, 0x00, 0x00, + // 2079 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x18, 0x4d, 0x73, 0xe3, 0x48, + 0xd5, 0x92, 0x3f, 0x62, 0x3f, 0x3b, 0x19, 0xa5, 0x93, 0xcd, 0x7a, 0xcc, 0x6e, 0x30, 0x9a, 0xad, + 0xdd, 0xac, 0x0f, 0x29, 0x36, 0x40, 0x51, 0x05, 0xb5, 0xc0, 0x8e, 0x93, 0x99, 0x09, 0x38, 0x33, + 0x89, 0x9c, 0x81, 0x23, 0xa5, 0x48, 0x1d, 0x47, 0x15, 0x5b, 0xd2, 0xe8, 0x23, 0x21, 0x7f, 0x02, + 0xb8, 0x70, 0xa0, 0xe0, 0x46, 0x51, 0x5c, 0x28, 0xfe, 0x06, 0x1c, 0xf7, 0xc8, 0x0d, 0x6a, 0xe6, + 0x6f, 0x70, 0xa0, 0xde, 0xeb, 0x96, 0xd4, 0x8a, 0x3f, 0x66, 0xa6, 0x0a, 0x2e, 0x2a, 0xbd, 0x8f, + 0x7e, 0xfd, 0xfa, 0x7d, 0x76, 0x3f, 0xd8, 0x70, 0x67, 0xb7, 0x41, 0x74, 0xcd, 0xa3, 0xfd, 0x30, + 0x0a, 0x92, 0x80, 0xe9, 0xe1, 0x85, 0xf9, 0x39, 0x6c, 0x8d, 0x13, 0x3b, 0x4a, 0xc6, 0xe9, 0xc5, + 0xb9, 0x1d, 0x5f, 0x5b, 0xfc, 0x55, 0xca, 0xe3, 0x84, 0x31, 0xa8, 0x25, 0x76, 0x7c, 0xdd, 0xd5, + 0xfa, 0xda, 0x5e, 0xcb, 0xa2, 0x7f, 0x73, 0x1f, 0xd8, 0xcb, 0xd0, 0xb5, 0x13, 0x6e, 0xf1, 0xa9, + 0x7d, 0x97, 0x71, 0x76, 0x61, 0xcd, 0x09, 0xfc, 0x84, 0xfb, 0x89, 0x64, 0xce, 0x40, 0x73, 0x0c, + 0x5b, 0x27, 0xde, 0x24, 0xba, 0xbf, 0x60, 0x17, 0xe0, 0xb1, 0xe7, 0x4f, 0x83, 0xc9, 0x73, 0x7b, + 0xc6, 0xe5, 0x1a, 0x05, 0xc3, 0x3e, 0x82, 0x96, 0x80, 0x4e, 0x83, 0xb8, 0xab, 0xf7, 0xb5, 0xbd, + 0x75, 0xab, 0x40, 0x98, 0x4f, 0xe1, 0x83, 0x17, 0x21, 0x47, 0xa1, 0xf7, 0x34, 0xee, 0x81, 0x1e, + 0x84, 0x24, 0x6e, 0xe3, 0x00, 0xf6, 0xc3, 0x8b, 0x7d, 0x24, 0xbe, 0x08, 0x2d, 0x3d, 0x08, 0xf1, + 0x34, 0x3e, 0x6e, 0xa6, 0x8b, 0xd3, 0xe0, 0xbf, 0x79, 0x03, 0x3b, 0xf7, 0x05, 0xc5, 0x61, 0xe0, + 0xc7, 0x7c, 0xa5, 0xa4, 0x1d, 0x68, 0x44, 0x3c, 0x4e, 0xa7, 0x09, 0xc9, 0x6a, 0x5a, 0x12, 0x42, + 0x7c, 0x1c, 0xa4, 0x91, 0xc3, 0xbb, 0x55, 0xda, 0x43, 0x42, 0xcc, 0x80, 0xea, 0x2c, 0x9e, 0x74, + 0x6b, 0x84, 0xc4, 0x5f, 0x73, 0x00, 0xdb, 0xc2, 0x8a, 0xef, 0x60, 0xf1, 0x3d, 0x60, 0x67, 0x29, + 0x8f, 0xee, 0xc6, 0x89, 0x9d, 0xa4, 0xb1, 0xc2, 0xe9, 0x17, 0xa6, 0x13, 0xa7, 0xf9, 0x0c, 0x36, + 0x89, 0xf3, 0x28, 0x8a, 0x82, 0x68, 0x15, 0xe3, 0x1f, 0x34, 0xe8, 0x3e, 0xb3, 0x7d, 0x77, 0x9a, + 0xed, 0x3f, 0x3e, 0x1b, 0xad, 0x92, 0xcc, 0x1e, 0x92, 0x35, 0x74, 0xb2, 0x46, 0x0b, 0xad, 0x31, + 0x3e, 0x1b, 0x15, 0x66, 0xb5, 0xa3, 0x49, 0xdc, 0xad, 0xf6, 0xab, 0xc8, 0x8e, 0xff, 0xe8, 0xbd, + 0x8b, 0xdc, 0x7b, 0xe2, 0xd8, 0x05, 0x02, 0x7d, 0x1f, 0xbf, 0x9a, 0x9e, 0xda, 0x49, 0xc2, 0x23, + 0xbf, 0x5b, 0x17, 0xbe, 0x2f, 0x30, 0x66, 0x08, 0xdb, 0xc3, 0x60, 0x36, 0x0b, 0xfc, 0x5f, 0x50, + 0x9c, 0xe6, 0x2e, 0x29, 0xcc, 0xae, 0x95, 0xcc, 0x2e, 0xcd, 0xab, 0xe7, 0xe6, 0x5d, 0xea, 0x88, + 0x1d, 0x68, 0x88, 0xd8, 0x97, 0x4a, 0x49, 0xc8, 0xfc, 0x9b, 0x06, 0x5b, 0x25, 0x1b, 0xbf, 0xf7, + 0x8e, 0xdf, 0x85, 0x8e, 0xd8, 0x43, 0x48, 0xa0, 0x7d, 0xdb, 0x07, 0x06, 0x99, 0x4a, 0xc1, 0x5b, + 0x25, 0x2e, 0xf6, 0x7d, 0x58, 0x8f, 0xa5, 0x03, 0xc4, 0xb2, 0x5a, 0xbf, 0xba, 0xd7, 0x3e, 0xd8, + 0xa4, 0x65, 0x2a, 0xc1, 0x2a, 0xf3, 0x99, 0x7f, 0xd1, 0x64, 0x50, 0x48, 0x57, 0xbf, 0xb7, 0xbe, + 0x5f, 0x40, 0x5b, 0xe8, 0x45, 0x02, 0xa4, 0xba, 0x0f, 0x0a, 0x75, 0x85, 0x5c, 0x95, 0x87, 0x8e, + 0x28, 0x94, 0x10, 0x6b, 0x84, 0xae, 0x86, 0xa2, 0xab, 0x58, 0x54, 0xe2, 0x32, 0xff, 0xac, 0x41, + 0x7b, 0x78, 0xc5, 0x1d, 0xa9, 0x39, 0xaa, 0x18, 0xda, 0x71, 0xcc, 0xdd, 0x4c, 0x45, 0x01, 0xb1, + 0x6d, 0xa8, 0x27, 0x41, 0x62, 0x4f, 0x49, 0xc9, 0xba, 0x25, 0x00, 0x0a, 0x95, 0xd4, 0x71, 0x78, + 0x1c, 0x5f, 0xa6, 0x53, 0xd2, 0xb2, 0x6e, 0x29, 0x18, 0x94, 0x76, 0x69, 0x7b, 0x53, 0xee, 0x92, + 0x43, 0xeb, 0x96, 0x84, 0xb0, 0x1e, 0xdd, 0xda, 0x91, 0xef, 0xf9, 0x13, 0x8a, 0xaf, 0xba, 0x95, + 0x81, 0xb8, 0xc2, 0xe5, 0x89, 0xed, 0x4d, 0xbb, 0x8d, 0xbe, 0xb6, 0xd7, 0xb1, 0x24, 0x64, 0x76, + 0x00, 0x0e, 0xd3, 0x59, 0x28, 0xed, 0xfb, 0x6b, 0x0d, 0x60, 0x14, 0xd8, 0xae, 0x54, 0xfa, 0x13, + 0x58, 0xbf, 0xf4, 0x7c, 0x2f, 0xbe, 0xe2, 0xee, 0xe3, 0xbb, 0x84, 0xc7, 0xa4, 0x7b, 0xd5, 0x2a, + 0x23, 0x51, 0x59, 0xd2, 0x5a, 0xb0, 0xe8, 0xc4, 0xa2, 0x60, 0x58, 0x0f, 0x9a, 0x61, 0x14, 0x4c, + 0x22, 0x1e, 0xc7, 0x32, 0x2e, 0x73, 0x18, 0xd7, 0xce, 0x78, 0x62, 0x8b, 0x12, 0x27, 0xa3, 0x53, + 0xc1, 0x98, 0xbf, 0xd3, 0x60, 0x7d, 0x7c, 0x65, 0x47, 0xae, 0xe7, 0x4f, 0x9e, 0x46, 0x41, 0x4a, + 0x45, 0x28, 0xb1, 0xa3, 0x09, 0xcf, 0x2a, 0xae, 0x84, 0x30, 0x1f, 0x0f, 0x0f, 0x47, 0xb8, 0x3f, + 0xe5, 0x23, 0xfe, 0x0b, 0xfd, 0xa3, 0x38, 0x19, 0x05, 0x8e, 0x9d, 0x78, 0x81, 0x2f, 0xb7, 0x2f, + 0x23, 0x29, 0x6b, 0xee, 0x7c, 0x87, 0x8c, 0x59, 0xa5, 0xac, 0x21, 0x08, 0xf5, 0x4e, 0x7d, 0x49, + 0xa9, 0x13, 0x25, 0x87, 0xcd, 0x3f, 0x55, 0x01, 0xc6, 0x77, 0xbe, 0x23, 0x0d, 0xd5, 0x87, 0x36, + 0x1d, 0xf8, 0xe8, 0x86, 0xfb, 0x49, 0x66, 0x26, 0x15, 0x85, 0xc2, 0x08, 0x3c, 0x0f, 0x33, 0x13, + 0xe5, 0x30, 0x96, 0x8d, 0x88, 0x3b, 0xdc, 0x4f, 0x90, 0x58, 0x25, 0x62, 0x81, 0x60, 0x26, 0x74, + 0x66, 0x76, 0x9c, 0xf0, 0xa8, 0x64, 0xa4, 0x12, 0x8e, 0x0d, 0xc0, 0x50, 0xe1, 0xa7, 0x89, 0xe7, + 0xca, 0x02, 0x33, 0x87, 0x47, 0x79, 0x74, 0x88, 0x4c, 0x5e, 0x43, 0xc8, 0x53, 0x71, 0x28, 0x4f, + 0x85, 0x49, 0xde, 0x9a, 0x90, 0x77, 0x1f, 0x8f, 0xf2, 0x2e, 0xa6, 0x81, 0x73, 0xed, 0xf9, 0x13, + 0x72, 0x40, 0x93, 0x4c, 0x55, 0xc2, 0xb1, 0x2f, 0xc1, 0x48, 0xfd, 0x88, 0xc7, 0xc1, 0xf4, 0x86, + 0xbb, 0xe4, 0xc7, 0xb8, 0xdb, 0x52, 0x72, 0x5e, 0xf5, 0xb0, 0x35, 0xc7, 0xaa, 0x78, 0x08, 0x44, + 0xf2, 0x48, 0x0f, 0xed, 0x02, 0x88, 0xf2, 0x7a, 0x7e, 0x17, 0xf2, 0x6e, 0x5b, 0x44, 0x4f, 0x81, + 0x31, 0xff, 0xa8, 0x41, 0x47, 0x2d, 0x43, 0x4a, 0x81, 0xd4, 0x96, 0x14, 0x48, 0x5d, 0x2d, 0x90, + 0xec, 0xf3, 0xbc, 0xb0, 0x88, 0x4a, 0x41, 0xda, 0x9e, 0x46, 0x01, 0xe6, 0xa1, 0x45, 0x84, 0xbc, + 0xd6, 0x7c, 0x01, 0xed, 0x08, 0x3b, 0x7d, 0x5e, 0xd1, 0xf2, 0xca, 0x62, 0x15, 0x68, 0x4b, 0xe5, + 0x31, 0xff, 0xae, 0x43, 0x5b, 0x21, 0xce, 0x79, 0x5a, 0x7b, 0x47, 0x4f, 0xeb, 0x4b, 0x3c, 0xdd, + 0xcf, 0x54, 0x4a, 0x2f, 0x0e, 0xbd, 0x48, 0x06, 0xbf, 0x8a, 0xca, 0x39, 0x4a, 0xa1, 0xa5, 0xa2, + 0xd8, 0x1e, 0x3c, 0x50, 0x40, 0x25, 0xb0, 0xee, 0xa3, 0xd9, 0x3e, 0x30, 0x42, 0x0d, 0xed, 0xc4, + 0xb9, 0x7a, 0x19, 0x9e, 0x90, 0x36, 0x14, 0x5d, 0x4d, 0x6b, 0x01, 0x85, 0x7d, 0x13, 0xea, 0x71, + 0x62, 0x4f, 0x38, 0x05, 0x56, 0xd6, 0x5e, 0x11, 0x61, 0x09, 0xbc, 0x62, 0xfc, 0xe6, 0x5b, 0x8c, + 0x6f, 0xfe, 0x47, 0x87, 0xf5, 0x52, 0xe3, 0x58, 0xd8, 0xcd, 0xf3, 0x1d, 0xf5, 0x25, 0x3b, 0xf6, + 0xa1, 0x96, 0xfa, 0x9e, 0x70, 0xf6, 0xc6, 0x41, 0x07, 0xe9, 0x2f, 0x7d, 0x2f, 0xc1, 0x58, 0xb2, + 0x88, 0xa2, 0xe8, 0x54, 0x7b, 0x5b, 0x40, 0x7c, 0x1b, 0xb6, 0x8a, 0x40, 0x3e, 0x3c, 0x1c, 0x8d, + 0x02, 0xe7, 0xfa, 0xf8, 0x50, 0x5a, 0x6f, 0x11, 0x89, 0x31, 0xd1, 0xae, 0x28, 0x21, 0x9f, 0x55, + 0x44, 0xc3, 0xfa, 0x0c, 0xea, 0x0e, 0xb6, 0x11, 0xb2, 0x92, 0x0c, 0x28, 0xa5, 0xaf, 0x3c, 0xab, + 0x58, 0x82, 0xce, 0x3e, 0x81, 0x9a, 0x9b, 0xce, 0x42, 0x69, 0xab, 0x0d, 0xe4, 0x2b, 0x0a, 0xfb, + 0xb3, 0x8a, 0x45, 0x54, 0xe4, 0x9a, 0x06, 0xb6, 0xdb, 0x6d, 0x15, 0x5c, 0x45, 0xbd, 0x47, 0x2e, + 0xa4, 0x22, 0x17, 0x66, 0x18, 0x65, 0x9b, 0xe4, 0x2a, 0x8a, 0x1d, 0x72, 0x21, 0xf5, 0x71, 0x13, + 0x1a, 0xb1, 0x08, 0xe4, 0x1f, 0xc1, 0x66, 0xc9, 0xfa, 0x23, 0x2f, 0x26, 0x53, 0x09, 0x72, 0x57, + 0x5b, 0xd6, 0xdd, 0xb3, 0xf5, 0xbb, 0x00, 0x74, 0x26, 0xd1, 0x70, 0x65, 0xd7, 0xd6, 0x8a, 0x6b, + 0xe3, 0xc7, 0xd0, 0xc2, 0xb3, 0xac, 0x20, 0xe3, 0x21, 0x96, 0x91, 0x43, 0xe8, 0x90, 0xf6, 0x67, + 0xa3, 0x25, 0x1c, 0xec, 0x00, 0xb6, 0x45, 0x03, 0xcd, 0xaf, 0xda, 0x1e, 0xb5, 0x0b, 0x91, 0x58, + 0x0b, 0x69, 0x58, 0xd0, 0x39, 0x8a, 0x1b, 0x9f, 0x8d, 0xb2, 0xae, 0x96, 0xc1, 0xe6, 0xf7, 0xa0, + 0x85, 0x3b, 0x8a, 0xed, 0xf6, 0xa0, 0x41, 0x84, 0xcc, 0x0e, 0x46, 0x6e, 0x4e, 0xa9, 0x90, 0x25, + 0xe9, 0xe6, 0x6f, 0xb4, 0xd2, 0xed, 0xe4, 0xbd, 0xab, 0x55, 0x7f, 0xfe, 0x72, 0xd3, 0x2a, 0xdf, + 0x65, 0xf6, 0x01, 0xa8, 0xe0, 0x64, 0x37, 0x99, 0xdc, 0xbd, 0x05, 0xd6, 0x52, 0x38, 0xd0, 0x31, + 0x05, 0xb4, 0xc0, 0xb4, 0xbf, 0xd7, 0xa1, 0xa3, 0x5e, 0x82, 0xfe, 0x5f, 0x69, 0xc7, 0x94, 0x97, + 0x44, 0x96, 0x19, 0x9f, 0x66, 0x99, 0x51, 0x2f, 0x8e, 0x51, 0x44, 0x51, 0x91, 0x18, 0x8f, 0x64, + 0x62, 0x34, 0x88, 0x6d, 0x3d, 0x4b, 0x8c, 0x8c, 0x4b, 0xe4, 0xc5, 0x23, 0x99, 0x17, 0x6b, 0x05, + 0x53, 0x1e, 0x52, 0x79, 0x5a, 0x3c, 0x92, 0x69, 0xd1, 0x2c, 0x98, 0x72, 0x37, 0xe7, 0x59, 0xb1, + 0x06, 0x75, 0x72, 0xa7, 0xf9, 0x03, 0x30, 0x54, 0xd3, 0x50, 0x4e, 0x7c, 0x2a, 0x89, 0xa5, 0x50, + 0x50, 0x2f, 0x91, 0x72, 0xed, 0x2b, 0x58, 0x2f, 0x15, 0x15, 0xec, 0x74, 0x5e, 0x3c, 0xb4, 0x7d, + 0x87, 0x4f, 0xf3, 0x2b, 0xa4, 0x82, 0x51, 0x82, 0x4c, 0x2f, 0x24, 0x4b, 0x11, 0xa5, 0x20, 0x53, + 0x2e, 0x82, 0xd5, 0xd2, 0x45, 0xf0, 0xaf, 0x1a, 0x34, 0xce, 0x85, 0x13, 0xbb, 0xb0, 0x76, 0x14, + 0x45, 0xc3, 0xc0, 0x15, 0x7e, 0xac, 0x5b, 0x19, 0x88, 0x41, 0x8f, 0xbf, 0x53, 0x3b, 0x8e, 0xe5, + 0x85, 0x35, 0x87, 0x25, 0x6d, 0xec, 0x04, 0x21, 0x97, 0x37, 0xd6, 0x1c, 0x96, 0xb4, 0x11, 0xbf, + 0xe1, 0x53, 0x79, 0x63, 0xcd, 0x61, 0xdc, 0xed, 0x84, 0xc7, 0x31, 0x06, 0x88, 0xa8, 0x8d, 0x19, + 0x88, 0xab, 0x2c, 0xfb, 0x76, 0x68, 0xa7, 0x31, 0x97, 0xb7, 0x94, 0x1c, 0x36, 0x39, 0x74, 0xd4, + 0xe3, 0xb1, 0x6f, 0x41, 0x8d, 0x2e, 0x01, 0xe2, 0xe5, 0x4a, 0xbe, 0x21, 0x82, 0x08, 0x22, 0xfc, + 0x2e, 0x78, 0x0d, 0xf4, 0x33, 0x77, 0x88, 0xee, 0x2e, 0xde, 0xbb, 0x25, 0x47, 0xf4, 0xa0, 0x3b, + 0xbe, 0xf5, 0x12, 0xe7, 0x8a, 0xd2, 0x40, 0x74, 0x2e, 0xf9, 0x60, 0x34, 0x0f, 0x60, 0x4b, 0x3e, + 0xa2, 0x4b, 0x4f, 0xfc, 0x6f, 0x28, 0x2f, 0xe8, 0x76, 0x9e, 0x5b, 0xe2, 0xd5, 0x68, 0xa6, 0xb0, + 0x5d, 0x5e, 0x23, 0x5f, 0x30, 0xab, 0x16, 0xfd, 0x0f, 0xde, 0xdd, 0xb7, 0xb0, 0x79, 0x9a, 0x46, + 0x93, 0xb2, 0xa2, 0x3d, 0x68, 0x7a, 0xbe, 0xed, 0x24, 0xde, 0x0d, 0x97, 0x11, 0x95, 0xc3, 0xf4, + 0x20, 0xf7, 0xe4, 0xd0, 0xa0, 0x6a, 0xd1, 0x3f, 0xf2, 0x5f, 0x7a, 0x53, 0x4e, 0xf9, 0x2d, 0x2b, + 0x5e, 0x06, 0x93, 0x2a, 0xe2, 0x96, 0x21, 0x5f, 0x98, 0x02, 0x42, 0xfb, 0xd1, 0x7b, 0x4d, 0x3c, + 0x69, 0x87, 0x81, 0x7f, 0xe9, 0x4d, 0x32, 0xfb, 0xdd, 0xc2, 0xc3, 0x05, 0xb4, 0xb7, 0x3c, 0xe9, + 0x8a, 0x33, 0xeb, 0x8b, 0xce, 0x5c, 0x2d, 0x9c, 0xab, 0xcc, 0x66, 0x6a, 0xa5, 0xd9, 0xcc, 0xe0, + 0x97, 0xd0, 0x10, 0x53, 0x0d, 0xb6, 0x0e, 0xad, 0x63, 0xff, 0xc6, 0x9e, 0x7a, 0xee, 0x8b, 0xd0, + 0xa8, 0xb0, 0x26, 0xd4, 0xc6, 0x49, 0x10, 0x1a, 0x1a, 0x6b, 0x41, 0xfd, 0x14, 0xe3, 0xcc, 0xd0, + 0x19, 0x40, 0x03, 0x93, 0x70, 0xc6, 0x8d, 0x2a, 0xa2, 0x69, 0x60, 0x64, 0xd4, 0x10, 0x2d, 0x46, + 0x19, 0x46, 0x9d, 0x6d, 0x00, 0x7c, 0x95, 0x26, 0x81, 0x64, 0x6b, 0x0c, 0x06, 0x50, 0xa7, 0x41, + 0x01, 0x09, 0xfc, 0xd9, 0xf1, 0xa9, 0x51, 0x61, 0x6d, 0x58, 0xb3, 0x8e, 0x4e, 0x47, 0x5f, 0x0d, + 0x8f, 0x0c, 0x0d, 0xd7, 0x1e, 0x3f, 0xff, 0xe9, 0xd1, 0xf0, 0xdc, 0xd0, 0x07, 0x3f, 0x27, 0x91, + 0x13, 0x3c, 0x41, 0x47, 0xea, 0x42, 0xb0, 0x51, 0x61, 0x6b, 0x50, 0x7d, 0xce, 0x6f, 0x0d, 0x8d, + 0x16, 0xa7, 0x3e, 0xbe, 0xe3, 0x84, 0x3e, 0xa4, 0x9a, 0x6b, 0x54, 0x91, 0x80, 0x0a, 0x87, 0xdc, + 0x35, 0x6a, 0xac, 0x03, 0xcd, 0x27, 0xf2, 0x61, 0x66, 0xd4, 0x07, 0x2f, 0xa0, 0x99, 0x15, 0x51, + 0xf6, 0x00, 0xda, 0x52, 0x34, 0xa2, 0x8c, 0x0a, 0x9e, 0x83, 0x4a, 0xa5, 0xa1, 0xa1, 0x8a, 0x58, + 0x0e, 0x0d, 0x1d, 0xff, 0xb0, 0xe6, 0x19, 0x55, 0x52, 0xfb, 0xce, 0x77, 0x8c, 0x1a, 0x32, 0x52, + 0xcc, 0x18, 0xee, 0xe0, 0x87, 0xd0, 0xca, 0x33, 0x0a, 0x95, 0x7d, 0xe9, 0x5f, 0xfb, 0xc1, 0xad, + 0x4f, 0x38, 0x71, 0xc0, 0xa3, 0x5f, 0x71, 0x6c, 0x6a, 0x86, 0x86, 0x1b, 0x92, 0xfc, 0x27, 0xd4, + 0x39, 0x0d, 0x7d, 0x70, 0x02, 0x6b, 0x32, 0xa2, 0x19, 0x83, 0x0d, 0xa9, 0x8c, 0xc4, 0x18, 0x15, + 0xf4, 0x03, 0x9e, 0x43, 0x6c, 0xa5, 0xa1, 0x3d, 0xe9, 0x88, 0x02, 0xd6, 0x51, 0x9c, 0xb0, 0xad, + 0x40, 0x54, 0x0f, 0xfe, 0xd5, 0x80, 0x86, 0x08, 0x1b, 0x36, 0x84, 0x8e, 0x3a, 0xc3, 0x63, 0x1f, + 0xca, 0xf6, 0x72, 0x7f, 0xaa, 0xd7, 0xeb, 0x52, 0x83, 0x58, 0x30, 0x60, 0x31, 0x2b, 0xec, 0x18, + 0x36, 0xca, 0xf3, 0x30, 0xf6, 0x10, 0xb9, 0x17, 0x0e, 0xdb, 0x7a, 0xbd, 0x45, 0xa4, 0x5c, 0xd4, + 0x11, 0xac, 0x97, 0x46, 0x5c, 0x8c, 0xf6, 0x5d, 0x34, 0xf5, 0x5a, 0xa9, 0xd1, 0x4f, 0xa0, 0xad, + 0x4c, 0x66, 0xd8, 0x0e, 0xb2, 0xce, 0x8f, 0xc3, 0x7a, 0x1f, 0xce, 0xe1, 0x73, 0x09, 0x5f, 0x02, + 0x14, 0xa3, 0x12, 0xf6, 0x41, 0xce, 0xa8, 0x4e, 0xc9, 0x7a, 0x3b, 0xf7, 0xd1, 0xf9, 0xf2, 0x27, + 0x00, 0x72, 0x54, 0x76, 0x36, 0x8a, 0xd9, 0x47, 0xc8, 0xb7, 0x6c, 0x74, 0xb6, 0xf2, 0x20, 0x27, + 0xb0, 0x39, 0x57, 0x41, 0x85, 0xb8, 0x65, 0x85, 0x75, 0xa5, 0xb8, 0x21, 0x74, 0xd4, 0x02, 0x2a, + 0xdc, 0xbd, 0xa0, 0x0c, 0x0b, 0x21, 0x8b, 0x6a, 0xad, 0x59, 0x61, 0x3f, 0x06, 0x28, 0xca, 0xa1, + 0x30, 0xcd, 0x5c, 0x79, 0x5c, 0xa9, 0xc5, 0x53, 0xd8, 0x54, 0xa6, 0xc1, 0xa2, 0x74, 0x09, 0x1f, + 0xcd, 0x0f, 0x89, 0x57, 0x0a, 0xb2, 0xe4, 0xe8, 0x52, 0xad, 0x81, 0xc2, 0x3a, 0xcb, 0xca, 0x66, + 0xef, 0xe3, 0x25, 0x54, 0xd5, 0x44, 0xea, 0xe8, 0x59, 0x98, 0x68, 0xc1, 0x30, 0x7a, 0x95, 0x62, + 0x8f, 0xbb, 0xff, 0x78, 0xbd, 0xab, 0x7d, 0xfd, 0x7a, 0x57, 0xfb, 0xf7, 0xeb, 0x5d, 0xed, 0xb7, + 0x6f, 0x76, 0x2b, 0x5f, 0xbf, 0xd9, 0xad, 0xfc, 0xf3, 0xcd, 0x6e, 0xe5, 0xa2, 0x41, 0xf3, 0xf3, + 0xef, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x6e, 0xa4, 0xad, 0x6b, 0x51, 0x17, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4167,10 +4167,10 @@ func (m *ShardingGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } - if len(m.FirstPos) > 0 { - i -= len(m.FirstPos) - copy(dAtA[i:], m.FirstPos) - i = encodeVarintDmworker(dAtA, i, uint64(len(m.FirstPos))) + if len(m.FirstLocation) > 0 { + i -= len(m.FirstLocation) + copy(dAtA[i:], m.FirstLocation) + i = encodeVarintDmworker(dAtA, i, uint64(len(m.FirstLocation))) i-- dAtA[i] = 0x1a } @@ -5792,7 +5792,7 @@ func (m *ShardingGroup) Size() (n int) { n += 1 + l + sovDmworker(uint64(l)) } } - l = len(m.FirstPos) + l = len(m.FirstLocation) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } @@ -8410,7 +8410,7 @@ func (m *ShardingGroup) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FirstPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FirstLocation", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8438,7 +8438,7 @@ func (m *ShardingGroup) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FirstPos = string(dAtA[iNdEx:postIndex]) + m.FirstLocation = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { diff --git a/dm/proto/dmworker.proto b/dm/proto/dmworker.proto index 10cbcaebeb..3c4dc782f7 100644 --- a/dm/proto/dmworker.proto +++ b/dm/proto/dmworker.proto @@ -181,7 +181,7 @@ message LoadStatus { message ShardingGroup { string target = 1; repeated string DDLs = 2; - string firstPos = 3; + string firstLocation = 3; repeated string synced = 4; repeated string unsynced = 5; } diff --git a/syncer/inject_sql.go b/syncer/inject_sql.go index bdf5373535..3b21acbd8b 100644 --- a/syncer/inject_sql.go +++ b/syncer/inject_sql.go @@ -19,12 +19,13 @@ import ( "github.com/pingcap/parser" "github.com/pingcap/parser/ast" - "github.com/siddontang/go-mysql/mysql" + //"github.com/siddontang/go-mysql/mysql" "github.com/siddontang/go-mysql/replication" "go.uber.org/zap" parserpkg "github.com/pingcap/dm/pkg/parser" "github.com/pingcap/dm/pkg/terror" + "github.com/pingcap/dm/pkg/binlog" ) // InjectSQLs injects ddl into syncer as binlog events while meet xid/query event @@ -70,7 +71,7 @@ func (s *Syncer) InjectSQLs(ctx context.Context, sqls []string) error { return nil } -func (s *Syncer) tryInject(op opType, pos mysql.Position) *replication.BinlogEvent { +func (s *Syncer) tryInject(op opType, location binlog.Location) *replication.BinlogEvent { if op != xid && op != ddl { return nil } @@ -79,7 +80,8 @@ func (s *Syncer) tryInject(op opType, pos mysql.Position) *replication.BinlogEve case e := <-s.injectEventCh: // try receive from extra binlog event chan // NOTE: now we simply set EventSize to 0, make event's start / end pos are the same - e.Header.LogPos = pos.Pos + // TODO: support GTID + e.Header.LogPos = location.Position.Pos e.Header.EventSize = 0 s.tctx.L().Info("inject binlog event from inject chan", zap.Reflect("header", e.Header), zap.Reflect("event", e.Event)) return e diff --git a/syncer/job.go b/syncer/job.go index 5daec6b2b1..df854911e7 100644 --- a/syncer/job.go +++ b/syncer/job.go @@ -32,7 +32,7 @@ const ( ddl xid flush - skip // used by Syncer.recordSkipSQLsPos to record global pos, but not execute SQL + skip // used by Syncer.recordSkipSQLsLocation to record global location, but not execute SQL rotate ) diff --git a/syncer/operator.go b/syncer/operator.go index 842aae98ca..260e828deb 100644 --- a/syncer/operator.go +++ b/syncer/operator.go @@ -14,9 +14,10 @@ package syncer import ( - "github.com/siddontang/go-mysql/mysql" + //"github.com/siddontang/go-mysql/mysql" "github.com/pingcap/dm/dm/pb" + "github.com/pingcap/dm/pkg/binlog" ) // SetSQLOperator sets an SQL operator to syncer @@ -26,6 +27,6 @@ func (s *Syncer) SetSQLOperator(req *pb.HandleSubTaskSQLsRequest) error { // tryApplySQLOperator tries to get SQLs by applying an possible operator // return whether applied, and the applied SQLs -func (s *Syncer) tryApplySQLOperator(pos mysql.Position, sqls []string) (bool, []string, error) { - return s.sqlOperatorHolder.Apply(s.tctx, pos, sqls) +func (s *Syncer) tryApplySQLOperator(location binlog.Location, sqls []string) (bool, []string, error) { + return s.sqlOperatorHolder.Apply(s.tctx, location, sqls) } diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index 3fd5cea5ca..2a80dbda01 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -17,7 +17,7 @@ import ( "encoding/json" "fmt" - "github.com/siddontang/go-mysql/mysql" + //"github.com/siddontang/go-mysql/mysql" "go.uber.org/zap" "github.com/pingcap/dm/pkg/binlog" @@ -33,15 +33,15 @@ const ( // DDLItem records ddl information used in sharding sequence organization type DDLItem struct { - FirstPos mysql.Position `json:"first-pos"` // first DDL's binlog Pos, not the End_log_pos of the event + FirstLocation binlog.Location `json:"first-location"` // first DDL's binlog Pos, not the End_log_pos of the event DDLs []string `json:"ddls"` // DDLs, these ddls are in the same QueryEvent Source string `json:"source"` // source table ID } // NewDDLItem creates a new DDLItem -func NewDDLItem(pos mysql.Position, ddls []string, source string) *DDLItem { +func NewDDLItem(location binlog.Location, ddls []string, source string) *DDLItem { return &DDLItem{ - FirstPos: pos, + FirstLocation: location, DDLs: ddls, Source: source, } @@ -49,7 +49,7 @@ func NewDDLItem(pos mysql.Position, ddls []string, source string) *DDLItem { // String returns the item's format string value func (item *DDLItem) String() string { - return fmt.Sprintf("first-pos: %s ddls: %+v source: %s", item.FirstPos, item.DDLs, item.Source) + return fmt.Sprintf("first-location: %s ddls: %+v source: %s", item.FirstLocation, item.DDLs, item.Source) } // ShardingSequence records a list of DDLItem @@ -136,7 +136,7 @@ func (meta *ShardingMeta) checkItemExists(item *DDLItem) (int, bool) { return 0, false } for idx, ddlItem := range source.Items { - if binlog.ComparePosition(item.FirstPos, ddlItem.FirstPos) == 0 { + if binlog.CompareLocation(item.FirstLocation, ddlItem.FirstLocation) == 0 { return idx, true } } @@ -227,12 +227,12 @@ func (meta *ShardingMeta) ResolveShardingDDL() bool { return false } -// ActiveDDLFirstPos returns the first binlog position of active DDL -func (meta *ShardingMeta) ActiveDDLFirstPos() (mysql.Position, error) { +// ActiveDDLFirstLocation returns the first binlog position of active DDL +func (meta *ShardingMeta) ActiveDDLFirstLocation() (binlog.Location, error) { if meta.activeIdx >= len(meta.global.Items) { - return mysql.Position{}, terror.ErrSyncUnitDDLActiveIndexLarger.Generate(meta.activeIdx, meta.global.Items) + return binlog.Location{}, terror.ErrSyncUnitDDLActiveIndexLarger.Generate(meta.activeIdx, meta.global.Items) } - return meta.global.Items[meta.activeIdx].FirstPos, nil + return meta.global.Items[meta.activeIdx].FirstLocation, nil } // FlushData returns sharding meta flush SQL and args diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index d5c1fdbc2e..facef1254d 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -102,8 +102,8 @@ type ShardingGroup struct { sourceID string // associate dm-worker source ID meta *shardmeta.ShardingMeta // sharding sequence meta storage - firstPos *mysql.Position // first DDL's binlog pos, used to restrain the global checkpoint when un-resolved - firstEndPos *mysql.Position // first DDL's binlog End_log_pos, used to re-direct binlog streamer after synced + firstLocation *binlog.Location // first DDL's binlog pos and gtid, used to restrain the global checkpoint when un-resolved + firstEndLocation *binlog.Location // first DDL's binlog End_log_pos and gtid, used to re-direct binlog streamer after synced ddls []string // DDL which current in syncing } @@ -114,8 +114,8 @@ func NewShardingGroup(sourceID, shardMetaSchema, shardMetaTable string, sources sources: make(map[string]bool, len(sources)), IsSchemaOnly: isSchemaOnly, sourceID: sourceID, - firstPos: nil, - firstEndPos: nil, + firstLocation: nil, + firstEndLocation: nil, } if meta != nil { sg.meta = meta @@ -188,8 +188,8 @@ func (sg *ShardingGroup) Reset() { for source := range sg.sources { sg.sources[source] = false } - sg.firstPos = nil - sg.firstEndPos = nil + sg.firstLocation = nil + sg.firstEndLocation = nil sg.ddls = nil } @@ -198,11 +198,11 @@ func (sg *ShardingGroup) Reset() { // synced: whether the source table's sharding group synced // active: whether the DDL will be processed in this round // remain: remain un-synced source table's count -func (sg *ShardingGroup) TrySync(source string, pos, endPos mysql.Position, ddls []string) (bool, bool, int, error) { +func (sg *ShardingGroup) TrySync(source string, location, endLocation binlog.Location, ddls []string) (bool, bool, int, error) { sg.Lock() defer sg.Unlock() - ddlItem := shardmeta.NewDDLItem(pos, ddls, source) + ddlItem := shardmeta.NewDDLItem(location, ddls, source) active, err := sg.meta.AddItem(ddlItem) if err != nil { return sg.remain <= 0, active, sg.remain, err @@ -212,9 +212,9 @@ func (sg *ShardingGroup) TrySync(source string, pos, endPos mysql.Position, ddls sg.remain-- } - if sg.firstPos == nil { - sg.firstPos = &pos // save first DDL's pos - sg.firstEndPos = &endPos + if sg.firstLocation == nil { + sg.firstLocation = &location // save first DDL's pos + sg.firstEndLocation = &endLocation sg.ddls = ddls } return sg.remain <= 0, active, sg.remain, nil @@ -223,14 +223,14 @@ func (sg *ShardingGroup) TrySync(source string, pos, endPos mysql.Position, ddls // CheckSyncing checks the source table syncing status // returns // beforeActiveDDL: whether the position is before active DDL -func (sg *ShardingGroup) CheckSyncing(source string, pos mysql.Position) (beforeActiveDDL bool) { +func (sg *ShardingGroup) CheckSyncing(source string, location binlog.Location) (beforeActiveDDL bool) { sg.RLock() defer sg.RUnlock() activeDDLItem := sg.meta.GetActiveDDLItem(source) if activeDDLItem == nil { return true } - return binlog.ComparePosition(activeDDLItem.FirstPos, pos) > 0 + return binlog.CompareLocation(activeDDLItem.FirstLocation, location) > 0 } // UnresolvedGroupInfo returns pb.ShardingGroup if is unresolved, else returns nil @@ -244,7 +244,7 @@ func (sg *ShardingGroup) UnresolvedGroupInfo() *pb.ShardingGroup { group := &pb.ShardingGroup{ DDLs: sg.ddls, - FirstPos: sg.firstPos.String(), + FirstLocation: sg.firstLocation.String(), Synced: make([]string, 0, len(sg.sources)-sg.remain), Unsynced: make([]string, 0, sg.remain), } @@ -307,23 +307,29 @@ func (sg *ShardingGroup) UnresolvedTables() [][]string { return tables } -// FirstPosUnresolved returns the first DDL pos if un-resolved, else nil -func (sg *ShardingGroup) FirstPosUnresolved() *mysql.Position { +// FirstLocationUnresolved returns the first DDL pos if un-resolved, else nil +func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { sg.RLock() defer sg.RUnlock() - if sg.remain < len(sg.sources) && sg.firstPos != nil { + if sg.remain < len(sg.sources) && sg.firstLocation != nil { // create a new pos to return - return &mysql.Position{ - Name: sg.firstPos.Name, - Pos: sg.firstPos.Pos, + return &binlog.Location { + Position: mysql.Position{ + Name: sg.firstLocation.Position.Name, + Pos: sg.firstLocation.Position.Pos, + }, + GTID: sg.firstLocation.GTID, } } item := sg.meta.GetGlobalActiveDDL() if item != nil { // make a new copy - return &mysql.Position{ - Name: item.FirstPos.Name, - Pos: item.FirstPos.Pos, + return &binlog.Location { + Position: mysql.Position{ + Name: item.FirstLocation.Position.Name, + Pos: item.FirstLocation.Position.Pos, + }, + GTID: item.FirstLocation.GTID, } } return nil @@ -333,11 +339,11 @@ func (sg *ShardingGroup) FirstPosUnresolved() *mysql.Position { func (sg *ShardingGroup) FirstEndPosUnresolved() *mysql.Position { sg.RLock() defer sg.RUnlock() - if sg.remain < len(sg.sources) && sg.firstEndPos != nil { + if sg.remain < len(sg.sources) && sg.firstEndLocation != nil { // create a new pos to return return &mysql.Position{ - Name: sg.firstEndPos.Name, - Pos: sg.firstEndPos.Pos, + Name: sg.firstEndLocation.Position.Name, + Pos: sg.firstEndLocation.Position.Pos, } } return nil @@ -364,12 +370,12 @@ func (sg *ShardingGroup) ResolveShardingDDL() bool { return reset } -// ActiveDDLFirstPos returns the first binlog position of active DDL -func (sg *ShardingGroup) ActiveDDLFirstPos() (mysql.Position, error) { +// ActiveDDLFirstLocation returns the first binlog position of active DDL +func (sg *ShardingGroup) ActiveDDLFirstLocation() (binlog.Location, error) { sg.RLock() defer sg.RUnlock() - pos, err := sg.meta.ActiveDDLFirstPos() - return pos, err + location, err := sg.meta.ActiveDDLFirstLocation() + return location, err } // FlushData returns sharding meta flush SQLs and args @@ -512,7 +518,7 @@ func (k *ShardingGroupKeeper) LeaveGroup(targetSchema, targetTable string, sourc // active: whether is active DDL in sequence sharding DDL // remain: remain un-synced source table's count func (k *ShardingGroupKeeper) TrySync( - targetSchema, targetTable, source string, pos, endPos mysql.Position, ddls []string) ( + targetSchema, targetTable, source string, location, endLocation binlog.Location, ddls []string) ( needShardingHandle bool, group *ShardingGroup, synced, active bool, remain int, err error) { targetTableID, schemaOnly := GenTableID(targetSchema, targetTable) @@ -528,17 +534,17 @@ func (k *ShardingGroupKeeper) TrySync( if !ok { return false, group, true, false, 0, nil } - synced, active, remain, err = group.TrySync(source, pos, endPos, ddls) + synced, active, remain, err = group.TrySync(source, location, endLocation, ddls) return true, group, synced, active, remain, err } // InSyncing checks whether the source is in sharding syncing -func (k *ShardingGroupKeeper) InSyncing(targetSchema, targetTable, source string, pos mysql.Position) bool { +func (k *ShardingGroupKeeper) InSyncing(targetSchema, targetTable, source string, location binlog.Location) bool { group := k.Group(targetSchema, targetTable) if group == nil { return false } - return !group.CheckSyncing(source, pos) + return !group.CheckSyncing(source, location) } // UnresolvedTables returns @@ -570,32 +576,32 @@ func (k *ShardingGroupKeeper) Group(targetSchema, targetTable string) *ShardingG return k.groups[targetTableID] } -// lowestFirstPosInGroups returns the lowest pos in all groups which are unresolved -func (k *ShardingGroupKeeper) lowestFirstPosInGroups() *mysql.Position { +// lowestFirstLocationInGroups returns the lowest pos in all groups which are unresolved +func (k *ShardingGroupKeeper) lowestFirstLocationInGroups() *binlog.Location { k.RLock() defer k.RUnlock() - var lowest *mysql.Position + var lowest *binlog.Location for _, group := range k.groups { - pos := group.FirstPosUnresolved() - if pos == nil { + location := group.FirstLocationUnresolved() + if location == nil { continue } if lowest == nil { - lowest = pos - } else if binlog.ComparePosition(*lowest, *pos) > 0 { - lowest = pos + lowest = location + } else if binlog.CompareLocation(*lowest, *location) > 0 { + lowest = location } } return lowest } -// AdjustGlobalPoint adjusts globalPoint with sharding groups' lowest first point -func (k *ShardingGroupKeeper) AdjustGlobalPoint(globalPoint mysql.Position) mysql.Position { - lowestFirstPos := k.lowestFirstPosInGroups() - if lowestFirstPos != nil && binlog.ComparePosition(*lowestFirstPos, globalPoint) < 0 { - return *lowestFirstPos +// AdjustGlobalLocation adjusts globalLocation with sharding groups' lowest first point +func (k *ShardingGroupKeeper) AdjustGlobalLocation(globalLocation binlog.Location) binlog.Location { + lowestFirstLocation := k.lowestFirstLocationInGroups() + if lowestFirstLocation != nil && binlog.CompareLocation(*lowestFirstLocation, globalLocation) < 0 { + return *lowestFirstLocation } - return globalPoint + return globalLocation } // Groups returns all sharding groups, often used for debug @@ -649,16 +655,16 @@ func (k *ShardingGroupKeeper) ResolveShardingDDL(targetSchema, targetTable strin return false, terror.ErrSyncUnitShardingGroupNotFound.Generate(targetSchema, targetTable) } -// ActiveDDLFirstPos returns the binlog position of active DDL -func (k *ShardingGroupKeeper) ActiveDDLFirstPos(targetSchema, targetTable string) (mysql.Position, error) { +// ActiveDDLFirstLocation returns the binlog position of active DDL +func (k *ShardingGroupKeeper) ActiveDDLFirstLocation(targetSchema, targetTable string) (binlog.Location, error) { group := k.Group(targetSchema, targetTable) k.Lock() defer k.Unlock() if group != nil { - pos, err := group.ActiveDDLFirstPos() - return pos, err + location, err := group.ActiveDDLFirstLocation() + return location, err } - return mysql.Position{}, terror.ErrSyncUnitShardingGroupNotFound.Generate(targetSchema, targetTable) + return binlog.Location{}, terror.ErrSyncUnitShardingGroupNotFound.Generate(targetSchema, targetTable) } // PrepareFlushSQLs returns all sharding meta flushed SQLs execpt for given table IDs @@ -762,8 +768,8 @@ func (k *ShardingGroupKeeper) LoadShardMeta() (map[string]*shardmeta.ShardingMet // ShardingReSync represents re-sync info for a sharding DDL group type ShardingReSync struct { - currPos mysql.Position // current DDL's binlog pos, initialize to first DDL's pos - latestPos mysql.Position // latest DDL's binlog pos + currLocation binlog.Location // current DDL's binlog location, initialize to first DDL's location + latestLocation binlog.Location // latest DDL's binlog location targetSchema string targetTable string allResolved bool diff --git a/syncer/syncer.go b/syncer/syncer.go index 5d300b3b7b..75e5bcec1e 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -171,9 +171,9 @@ type Syncer struct { // TODO: re-implement tracer flow for binlog event later. tracer *tracing.Tracer - currentPosMu struct { + currentLocationMu struct { sync.RWMutex - currentPos mysql.Position // use to calc remain binlog size + currentLocation binlog.Location // use to calc remain binlog size } addJobFunc func(*job) error @@ -753,7 +753,7 @@ func (s *Syncer) addJob(job *job) error { func (s *Syncer) saveGlobalPoint(globalLocation binlog.Location) { if s.cfg.IsSharding { // TODO: maybe need to compare GTID? - globalLocation.Position = s.sgk.AdjustGlobalPoint(globalLocation.Position) + globalLocation = s.sgk.AdjustGlobalLocation(globalLocation) } s.checkpoint.SaveGlobalPoint(globalLocation) } @@ -1069,7 +1069,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { var ( shardingReSyncCh = make(chan *ShardingReSync, 10) shardingReSync *ShardingReSync - savedGlobalLastPos mysql.Position + savedGlobalLastLocation binlog.Location latestOp opType // latest job operation tp eventTimeoutCounter time.Duration traceSource = fmt.Sprintf("%s.syncer.%s", s.cfg.SourceID, s.cfg.Name) @@ -1083,7 +1083,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // if remaining DDLs in sequence, redirect global stream to the next sharding DDL position if !shardingReSync.allResolved { - nextPos, err2 := s.sgk.ActiveDDLFirstPos(shardingReSync.targetSchema, shardingReSync.targetTable) + nextPos, err2 := s.sgk.ActiveDDLFirstLocation(shardingReSync.targetSchema, shardingReSync.targetTable) if err2 != nil { return err2 } @@ -1094,24 +1094,24 @@ func (s *Syncer) Run(ctx context.Context) (err error) { } } shardingReSync = nil - lastPos = savedGlobalLastPos // restore global last pos + lastLocation = savedGlobalLastLocation // restore global last pos return nil } for { - s.currentPosMu.Lock() - s.currentPosMu.currentPos = currentPos - s.currentPosMu.Unlock() + s.currentLocationMu.Lock() + s.currentLocationMu.currentLocation = currentLocation + s.currentLocationMu.Unlock() // fetch from sharding resync channel if needed, and redirect global // stream to current binlog position recorded by ShardingReSync if shardingReSync == nil && len(shardingReSyncCh) > 0 { // some sharding groups need to re-syncing shardingReSync = <-shardingReSyncCh - savedGlobalLastPos = lastPos // save global last pos - lastPos = shardingReSync.currPos + savedGlobalLastLocation = lastLocation // save global last location + lastLocation = shardingReSync.currLocation - err = s.streamerController.RedirectStreamer(s.tctx, shardingReSync.currPos) + err = s.streamerController.RedirectStreamer(s.tctx, shardingReSync.currLocation) if err != nil { return err } @@ -1126,7 +1126,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // we only inject sqls in global streaming to avoid DDL position confusion if shardingReSync == nil { - e = s.tryInject(latestOp, currentPos) + e = s.tryInject(latestOp, currentLocation) latestOp = null } if e == nil { @@ -1135,7 +1135,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { startTime := time.Now() if err == context.Canceled { - s.tctx.L().Info("binlog replication main routine quit(context canceled)!", zap.Stringer("last position", lastPos)) + s.tctx.L().Info("binlog replication main routine quit(context canceled)!", zap.Stringer("last location", lastLocation)) return nil } else if err == context.DeadlineExceeded { s.tctx.L().Info("deadline exceeded when fetching binlog event") @@ -1151,7 +1151,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { eventTimeoutCounter = 0 if s.needResync() { s.tctx.L().Info("timeout when fetching binlog event, there must be some problems with replica connection, try to re-connect") - err = s.streamerController.ReopenWithRetry(tctx, lastPos) + err = s.streamerController.ReopenWithRetry(tctx, lastLocation) if err != nil { return err } @@ -1160,7 +1160,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { } else if isDuplicateServerIDError(err) { // if the server id is already used, need to use a new server id tctx.L().Info("server id is already used by another slave, will change to a new server id and get event again") - err1 := s.streamerController.UpdateServerIDAndResetReplication(tctx, lastPos) + err1 := s.streamerController.UpdateServerIDAndResetReplication(tctx, lastLocation) if err1 != nil { return err1 } @@ -1171,7 +1171,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { s.tctx.L().Error("fail to fetch binlog", log.ShortError(err)) if s.streamerController.CanRetry() { - err = s.streamerController.ResetReplicationSyncer(s.tctx, lastPos) + err = s.streamerController.ResetReplicationSyncer(s.tctx, lastLocation) if err != nil { return err } @@ -1181,7 +1181,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // try to re-sync in gtid mode if tryReSync && s.cfg.EnableGTID && isBinlogPurgedError(err) && s.cfg.AutoFixGTID { time.Sleep(retryTimeout) - err = s.reSyncBinlog(*tctx, lastPos) + err = s.reSyncBinlog(*tctx, lastLocation) if err != nil { return err } @@ -1195,7 +1195,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // get binlog event, reset tryReSync, so we can re-sync binlog while syncer meets errors next time tryReSync = true binlogPosGauge.WithLabelValues("syncer", s.cfg.Name).Set(float64(e.Header.LogPos)) - index, err := binlog.GetFilenameIndex(lastPos.Name) + index, err := binlog.GetFilenameIndex(lastLocation.Position.Name) if err != nil { s.tctx.L().Error("fail to get index number of binlog file", log.ShortError(err)) } else { @@ -1209,8 +1209,8 @@ func (s *Syncer) Run(ctx context.Context) (err error) { ec := eventContext{ tctx: tctx, header: e.Header, - currentPos: ¤tPos, - lastPos: &lastPos, + currentLocation: ¤tLocation, + lastLocation: &lastLocation, shardingReSync: shardingReSync, latestOp: &latestOp, closeShardingResync: closeShardingResync, @@ -1227,41 +1227,41 @@ func (s *Syncer) Run(ctx context.Context) (err error) { case *replication.RotateEvent: err = s.handleRotateEvent(ev, ec) if err != nil { - return terror.Annotatef(err, "current pos %s", currentPos) + return terror.Annotatef(err, "current location %s", currentLocation) } case *replication.RowsEvent: err = s.handleRowsEvent(ev, ec) if err != nil { - return terror.Annotatef(err, "current pos %s", currentPos) + return terror.Annotatef(err, "current location %s", currentLocation) } case *replication.QueryEvent: err = s.handleQueryEvent(ev, ec) if err != nil { - return terror.Annotatef(err, "current pos %s", currentPos) + return terror.Annotatef(err, "current location %s", currentLocation) } case *replication.XIDEvent: if shardingReSync != nil { - shardingReSync.currPos.Pos = e.Header.LogPos - lastPos = shardingReSync.currPos - if binlog.ComparePosition(shardingReSync.currPos, shardingReSync.latestPos) >= 0 { + shardingReSync.currLocation.Position.Pos = e.Header.LogPos + lastLocation = shardingReSync.currLocation + if binlog.CompareLocation(shardingReSync.currLocation, shardingReSync.latestLocation) >= 0 { s.tctx.L().Info("re-replicate shard group was completed", zap.String("event", "XID"), zap.Reflect("re-shard", shardingReSync)) err = closeShardingResync() if err != nil { - return terror.Annotatef(err, "shard group current pos %s", shardingReSync.currPos) + return terror.Annotatef(err, "shard group current location %s", shardingReSync.currLocation) } continue } } latestOp = xid - currentPos.Pos = e.Header.LogPos - s.tctx.L().Debug("", zap.String("event", "XID"), zap.Stringer("last position", lastPos), log.WrapStringerField("position", currentPos), log.WrapStringerField("gtid set", ev.GSet)) - lastPos.Pos = e.Header.LogPos // update lastPos + currentLocation.Position.Pos = e.Header.LogPos + s.tctx.L().Debug("", zap.String("event", "XID"), zap.Stringer("last location", lastLocation), log.WrapStringerField("location", currentLocation), log.WrapStringerField("gtid set", ev.GSet)) + lastLocation.Position.Pos = e.Header.LogPos // update lastPos - job := newXIDJob(currentPos, currentPos, nil, traceID) + job := newXIDJob(currentLocation, currentLocation, traceID) err = s.addJobFunc(job) if err != nil { - return terror.Annotatef(err, "current pos %s", currentPos) + return terror.Annotatef(err, "current location %s", currentLocation) } } } @@ -1270,8 +1270,8 @@ func (s *Syncer) Run(ctx context.Context) (err error) { type eventContext struct { tctx *tcontext.Context header *replication.EventHeader - currentPos *mysql.Position - lastPos *mysql.Position + currentLocation *binlog.Location + lastLocation *binlog.Location shardingReSync *ShardingReSync latestOp *opType closeShardingResync func() error @@ -1287,47 +1287,53 @@ type eventContext struct { // TODO: Further split into smaller functions and group common arguments into // a context struct. func (s *Syncer) handleRotateEvent(ev *replication.RotateEvent, ec eventContext) error { - *ec.currentPos = mysql.Position{ - Name: string(ev.NextLogName), - Pos: uint32(ev.Position), + *ec.currentLocation = binlog.Location { + Position: mysql.Position{ + Name: string(ev.NextLogName), + Pos: uint32(ev.Position), + }, + GTID: ec.currentLocation.GTID, } - if binlog.ComparePosition(*ec.currentPos, *ec.lastPos) > 0 { - *ec.lastPos = *ec.currentPos + if binlog.CompareLocation(*ec.currentLocation, *ec.lastLocation) > 0 { + *ec.lastLocation = *ec.currentLocation } if ec.shardingReSync != nil { - if binlog.ComparePosition(*ec.currentPos, ec.shardingReSync.currPos) > 0 { - ec.shardingReSync.currPos = *ec.currentPos + if binlog.CompareLocation(*ec.currentLocation, ec.shardingReSync.currLocation) > 0 { + ec.shardingReSync.currLocation = *ec.currentLocation } - if binlog.ComparePosition(ec.shardingReSync.currPos, ec.shardingReSync.latestPos) >= 0 { + if binlog.CompareLocation(ec.shardingReSync.currLocation, ec.shardingReSync.latestLocation) >= 0 { s.tctx.L().Info("re-replicate shard group was completed", zap.String("event", "rotate"), zap.Reflect("re-shard", ec.shardingReSync)) err := ec.closeShardingResync() if err != nil { return err } } else { - s.tctx.L().Debug("re-replicate shard group", zap.String("event", "rotate"), log.WrapStringerField("position", ec.currentPos), zap.Reflect("re-shard", ec.shardingReSync)) + s.tctx.L().Debug("re-replicate shard group", zap.String("event", "rotate"), log.WrapStringerField("location", ec.currentLocation), zap.Reflect("re-shard", ec.shardingReSync)) } return nil } *ec.latestOp = rotate - s.tctx.L().Info("", zap.String("event", "rotate"), log.WrapStringerField("position", ec.currentPos)) + s.tctx.L().Info("", zap.String("event", "rotate"), log.WrapStringerField("location", ec.currentLocation)) return nil } func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) error { originSchema, originTable := string(ev.Table.Schema), string(ev.Table.Table) schemaName, tableName := s.renameShardingSchema(originSchema, originTable) - *ec.currentPos = mysql.Position{ - Name: ec.lastPos.Name, - Pos: ec.header.LogPos, + *ec.currentLocation = binlog.Location { + Position: mysql.Position{ + Name: ec.lastLocation.Position.Name, + Pos: ec.header.LogPos, + }, + GTID: ec.lastLocation.GTID, } if ec.shardingReSync != nil { - ec.shardingReSync.currPos.Pos = ec.header.LogPos - if binlog.ComparePosition(ec.shardingReSync.currPos, ec.shardingReSync.latestPos) >= 0 { + ec.shardingReSync.currLocation.Position.Pos = ec.header.LogPos + if binlog.CompareLocation(ec.shardingReSync.currLocation, ec.shardingReSync.latestLocation) >= 0 { s.tctx.L().Info("re-replicate shard group was completed", zap.String("event", "row"), zap.Reflect("re-shard", ec.shardingReSync)) return ec.closeShardingResync() } @@ -1339,12 +1345,12 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err } // DML position before table checkpoint, ignore it - if !s.checkpoint.IsNewerTablePoint(originSchema, originTable, *ec.currentPos) { - s.tctx.L().Debug("ignore obsolete event that is old than table checkpoint", zap.String("event", "row"), log.WrapStringerField("position", ec.currentPos), zap.String("origin schema", originSchema), zap.String("origin table", originTable)) + if !s.checkpoint.IsNewerTablePoint(originSchema, originTable, *ec.currentLocation) { + s.tctx.L().Debug("ignore obsolete event that is old than table checkpoint", zap.String("event", "row"), log.WrapStringerField("position", ec.currentLocation), zap.String("origin schema", originSchema), zap.String("origin table", originTable)) return nil } - s.tctx.L().Debug("", zap.String("event", "row"), zap.String("origin schema", originSchema), zap.String("origin table", originTable), zap.String("target schema", schemaName), zap.String("target table", tableName), log.WrapStringerField("position", ec.currentPos), zap.Reflect("raw event data", ev.Rows)) + s.tctx.L().Debug("", zap.String("event", "row"), zap.String("origin schema", originSchema), zap.String("origin table", originTable), zap.String("target schema", schemaName), zap.String("target table", tableName), log.WrapStringerField("location", ec.currentLocation), zap.Reflect("raw event data", ev.Rows)) if s.cfg.EnableHeartbeat { s.heartbeat.TryUpdateTaskTs(s.cfg.Name, originSchema, originTable, ev.Rows) @@ -1356,16 +1362,16 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err } if ignore { binlogSkippedEventsTotal.WithLabelValues("rows", s.cfg.Name).Inc() - // for RowsEvent, we should record lastPos rather than currentPos - return s.recordSkipSQLsPos(*ec.lastPos, nil) + // for RowsEvent, we should record lastLocation rather than currentLocation + return s.recordSkipSQLsLocation(*ec.lastLocation) } if s.cfg.IsSharding { source, _ := GenTableID(originSchema, originTable) - if s.sgk.InSyncing(schemaName, tableName, source, *ec.currentPos) { + if s.sgk.InSyncing(schemaName, tableName, source, *ec.currentLocation) { // if in unsync stage and not before active DDL, ignore it // if in sharding re-sync stage and not before active DDL (the next DDL to be synced), ignore it - s.tctx.L().Debug("replicate sharding DDL, ignore Rows event", zap.String("event", "row"), zap.String("source", source), log.WrapStringerField("position", ec.currentPos)) + s.tctx.L().Debug("replicate sharding DDL, ignore Rows event", zap.String("event", "row"), zap.String("source", source), log.WrapStringerField("location", ec.currentLocation)) return nil } } @@ -1393,7 +1399,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err // for RowsEvent, one event may have multi SQLs and multi keys, (eg. INSERT INTO t1 VALUES (11, 12), (21, 22) ) // to cover them dispatched to different channels, we still apply operator here // ugly, but I have no better solution yet. - applied, sqls, err = s.tryApplySQLOperator(*ec.currentPos, nil) // forbidden sql-pattern for DMLs + applied, sqls, err = s.tryApplySQLOperator(*ec.currentLocation, nil) // forbidden sql-pattern for DMLs if err != nil { return err } @@ -1453,7 +1459,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err if keys != nil { key = keys[i] } - err = s.commitJob(*ec.latestOp, originSchema, originTable, schemaName, tableName, sqls[i], arg, key, true, *ec.lastPos, *ec.currentPos, nil, *ec.traceID) + err = s.commitJob(*ec.latestOp, originSchema, originTable, schemaName, tableName, sqls[i], arg, key, true, *ec.lastLocation, *ec.currentLocation, *ec.traceID) if err != nil { return err } @@ -1462,23 +1468,27 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err } func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) error { - *ec.currentPos = mysql.Position{ - Name: ec.lastPos.Name, - Pos: ec.header.LogPos, + *ec.currentLocation = binlog.Location { + Position: mysql.Position{ + Name: ec.lastLocation.Position.Name, + Pos: ec.header.LogPos, + }, + // TODO: use ev.GSet? + GTID: ec.lastLocation.GTID, } sql := strings.TrimSpace(string(ev.Query)) usedSchema := string(ev.Schema) parseResult, err := s.parseDDLSQL(sql, ec.parser2, usedSchema) if err != nil { - s.tctx.L().Error("fail to parse statement", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last position", ec.lastPos), log.WrapStringerField("position", ec.currentPos), log.WrapStringerField("gtid set", ev.GSet), log.ShortError(err)) + s.tctx.L().Error("fail to parse statement", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet), log.ShortError(err)) return err } if parseResult.ignore { binlogSkippedEventsTotal.WithLabelValues("query", s.cfg.Name).Inc() s.tctx.L().Warn("skip event", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema)) - *ec.lastPos = *ec.currentPos // before record skip pos, update lastPos - return s.recordSkipSQLsPos(*ec.lastPos, nil) + *ec.lastLocation = *ec.currentLocation // before record skip location, update lastLocation + return s.recordSkipSQLsLocation(*ec.lastLocation) } if !parseResult.isDDL { // skipped sql maybe not a DDL (like `BEGIN`) @@ -1486,8 +1496,8 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } if ec.shardingReSync != nil { - ec.shardingReSync.currPos.Pos = ec.header.LogPos - if binlog.ComparePosition(ec.shardingReSync.currPos, ec.shardingReSync.latestPos) >= 0 { + ec.shardingReSync.currLocation.Position.Pos = ec.header.LogPos + if binlog.CompareLocation(ec.shardingReSync.currLocation, ec.shardingReSync.latestLocation) >= 0 { s.tctx.L().Info("re-replicate shard group was completed", zap.String("event", "query"), zap.String("statement", sql), zap.Reflect("re-shard", ec.shardingReSync)) err2 := ec.closeShardingResync() if err2 != nil { @@ -1497,14 +1507,14 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // in re-syncing, we can simply skip all DDLs, // as they have been added to sharding DDL sequence // only update lastPos when the query is a real DDL - *ec.lastPos = ec.shardingReSync.currPos + *ec.lastLocation = ec.shardingReSync.currLocation s.tctx.L().Debug("skip event in re-replicating sharding group", zap.String("event", "query"), zap.String("statement", sql), zap.Reflect("re-shard", ec.shardingReSync)) } return nil } - s.tctx.L().Info("", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last position", ec.lastPos), log.WrapStringerField("position", ec.currentPos), log.WrapStringerField("gtid set", ev.GSet)) - *ec.lastPos = *ec.currentPos // update lastPos, because we have checked `isDDL` + s.tctx.L().Info("", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet)) + *ec.lastLocation = *ec.currentLocation // update lastLocation, because we have checked `isDDL` *ec.latestOp = ddl var ( @@ -1516,10 +1526,10 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // so can handle sharding cases sqls, onlineDDLTableNames, err = s.resolveDDLSQL(ec.tctx, ec.parser2, parseResult.stmt, usedSchema) if err != nil { - s.tctx.L().Error("fail to resolve statement", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last position", ec.lastPos), log.WrapStringerField("position", ec.currentPos), log.WrapStringerField("gtid set", ev.GSet), log.ShortError(err)) + s.tctx.L().Error("fail to resolve statement", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet), log.ShortError(err)) return err } - s.tctx.L().Info("resolve sql", zap.String("event", "query"), zap.String("raw statement", sql), zap.Strings("statements", sqls), zap.String("schema", usedSchema), zap.Stringer("last position", ec.lastPos), zap.Stringer("position", ec.currentPos), log.WrapStringerField("gtid set", ev.GSet)) + s.tctx.L().Info("resolve sql", zap.String("event", "query"), zap.String("raw statement", sql), zap.Strings("statements", sqls), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), zap.Stringer("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet)) if len(onlineDDLTableNames) > 1 { return terror.ErrSyncerUnitOnlineDDLOnMultipleTable.Generate(string(ev.Query)) @@ -1563,8 +1573,8 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // for DDL, we wait it to be executed, so we can check if event is newer in this syncer's main process goroutine // ignore obsolete DDL here can avoid to try-sync again for already synced DDLs - if !s.checkpoint.IsNewerTablePoint(tableNames[0][0].Schema, tableNames[0][0].Name, *ec.currentPos) { - s.tctx.L().Info("ignore obsolete DDL", zap.String("event", "query"), zap.String("statement", sql), log.WrapStringerField("position", ec.currentPos)) + if !s.checkpoint.IsNewerTablePoint(tableNames[0][0].Schema, tableNames[0][0].Name, *ec.currentLocation) { + s.tctx.L().Info("ignore obsolete DDL", zap.String("event", "query"), zap.String("statement", sql), log.WrapStringerField("location", ec.currentLocation)) continue } @@ -1611,24 +1621,24 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e targetTbls[tableNames[1][0].String()] = tableNames[1][0] } - s.tctx.L().Info("prepare to handle ddls", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("position", ec.currentPos)) + s.tctx.L().Info("prepare to handle ddls", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("location", ec.currentLocation)) if len(needHandleDDLs) == 0 { - s.tctx.L().Info("skip event, need handled ddls is empty", zap.String("event", "query"), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("position", ec.currentPos)) - return s.recordSkipSQLsPos(*ec.lastPos, nil) + s.tctx.L().Info("skip event, need handled ddls is empty", zap.String("event", "query"), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("location", ec.currentLocation)) + return s.recordSkipSQLsLocation(*ec.lastLocation) } if !s.cfg.IsSharding { - s.tctx.L().Info("start to handle ddls in normal mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("position", ec.currentPos)) + s.tctx.L().Info("start to handle ddls in normal mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("location", ec.currentLocation)) // try apply SQL operator before addJob. now, one query event only has one DDL job, if updating to multi DDL jobs, refine this. - applied, appliedSQLs, applyErr := s.tryApplySQLOperator(*ec.currentPos, needHandleDDLs) + applied, appliedSQLs, applyErr := s.tryApplySQLOperator(*ec.currentLocation, needHandleDDLs) if applyErr != nil { - return terror.Annotatef(applyErr, "try apply SQL operator on binlog-pos %s with DDLs %v", ec.currentPos, needHandleDDLs) + return terror.Annotatef(applyErr, "try apply SQL operator on binlog-location %s with DDLs %v", ec.currentLocation, needHandleDDLs) } if applied { - s.tctx.L().Info("replace ddls to preset ddls by sql operator in normal mode", zap.String("event", "query"), zap.Strings("preset ddls", appliedSQLs), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("position", ec.currentPos)) + s.tctx.L().Info("replace ddls to preset ddls by sql operator in normal mode", zap.String("event", "query"), zap.Strings("preset ddls", appliedSQLs), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("location", ec.currentLocation)) needHandleDDLs = appliedSQLs // maybe nil } - job := newDDLJob(nil, needHandleDDLs, *ec.lastPos, *ec.currentPos, nil, *ec.traceID) + job := newDDLJob(nil, needHandleDDLs, *ec.lastLocation, *ec.currentLocation, *ec.traceID) err = s.addJobFunc(job) if err != nil { return err @@ -1640,7 +1650,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e return terror.ErrSyncerUnitHandleDDLFailed.Generate(ev.Query) } - s.tctx.L().Info("finish to handle ddls in normal mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("position", ec.currentPos)) + s.tctx.L().Info("finish to handle ddls in normal mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("location", ec.currentLocation)) for _, td := range needTrackDDLs { if err = s.trackDDL(usedSchema, td.rawSQL, td.tableNames, td.stmt, &ec); err != nil { @@ -1649,7 +1659,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } for _, tbl := range targetTbls { // save checkpoint of each table - s.saveTablePoint(tbl.Schema, tbl.Name, *ec.currentPos) + s.saveTablePoint(tbl.Schema, tbl.Name, *ec.currentLocation) } for _, table := range onlineDDLTableNames { @@ -1675,8 +1685,8 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // for sharding DDL, the firstPos should be the `Pos` of the binlog, not the `End_log_pos` // so when restarting before sharding DDLs synced, this binlog can be re-sync again to trigger the TrySync startPos := mysql.Position{ - Name: ec.currentPos.Name, - Pos: ec.currentPos.Pos - ec.header.EventSize, + Name: ec.currentLocation.Position.Name, + Pos: ec.currentLocation.Position.Pos - ec.header.EventSize, } source, _ = GenTableID(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name) @@ -1692,7 +1702,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } annotate = "add table to shard group" default: - needShardingHandle, group, synced, active, remain, err = s.sgk.TrySync(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name, source, startPos, *ec.currentPos, needHandleDDLs) + needShardingHandle, group, synced, active, remain, err = s.sgk.TrySync(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name, source, startPos, *ec.currentLocation, needHandleDDLs) if err != nil { return err } @@ -1723,14 +1733,14 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // save checkpoint in memory, don't worry, if error occurred, we can rollback it // for non-last sharding DDL's table, this checkpoint will be used to skip binlog event when re-syncing // NOTE: when last sharding DDL executed, all this checkpoints will be flushed in the same txn - s.tctx.L().Info("save table checkpoint for source", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end position", ec.currentPos)) - s.saveTablePoint(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name, *ec.currentPos) + s.tctx.L().Info("save table checkpoint for source", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) + s.saveTablePoint(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name, *ec.currentLocation) if !synced { - s.tctx.L().Info("source shard group is not synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end position", ec.currentPos)) + s.tctx.L().Info("source shard group is not synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) return nil } - s.tctx.L().Info("source shard group is synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end position", ec.currentPos)) + s.tctx.L().Info("source shard group is synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) err = ec.safeMode.DescForTable(s.tctx, ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name) // try disable safe-mode after sharding group synced if err != nil { return err @@ -1750,7 +1760,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } *ec.shardingReSyncCh <- &ShardingReSync{ currPos: *firstEndPos, - latestPos: *ec.currentPos, + latestLocation: *ec.currentLocation, targetSchema: ddlInfo.tableNames[1][0].Schema, targetTable: ddlInfo.tableNames[1][0].Name, allResolved: allResolved, @@ -1795,24 +1805,24 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } }) - s.tctx.L().Info("execute DDL job", zap.String("event", "query"), zap.String("source", source), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end position", ec.currentPos), zap.Stringer("operation", shardOp)) + s.tctx.L().Info("execute DDL job", zap.String("event", "query"), zap.String("source", source), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation), zap.Stringer("operation", shardOp)) } else { - s.tctx.L().Info("ignore DDL job", zap.String("event", "query"), zap.String("source", source), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end position", ec.currentPos), zap.Stringer("operation", shardOp)) + s.tctx.L().Info("ignore DDL job", zap.String("event", "query"), zap.String("source", source), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation), zap.Stringer("operation", shardOp)) } } - s.tctx.L().Info("start to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end position", ec.currentPos)) + s.tctx.L().Info("start to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) // try apply SQL operator before addJob. now, one query event only has one DDL job, if updating to multi DDL jobs, refine this. - applied, appliedSQLs, err := s.tryApplySQLOperator(*ec.currentPos, needHandleDDLs) + applied, appliedSQLs, err := s.tryApplySQLOperator(*ec.currentLocation, needHandleDDLs) if err != nil { - return terror.Annotatef(err, "try apply SQL operator on binlog-pos %s with DDLs %v", ec.currentPos, needHandleDDLs) + return terror.Annotatef(err, "try apply SQL operator on binlog-location %s with DDLs %v", ec.currentLocation, needHandleDDLs) } if applied { - s.tctx.L().Info("replace ddls to preset ddls by sql operator in shard mode", zap.String("event", "query"), zap.Strings("preset ddls", appliedSQLs), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end position", ec.currentPos)) + s.tctx.L().Info("replace ddls to preset ddls by sql operator in shard mode", zap.String("event", "query"), zap.Strings("preset ddls", appliedSQLs), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) needHandleDDLs = appliedSQLs // maybe nil } - job := newDDLJob(ddlInfo, needHandleDDLs, *ec.lastPos, *ec.currentPos, nil, *ec.traceID) + job := newDDLJob(ddlInfo, needHandleDDLs, *ec.lastLocation, *ec.currentLocation, *ec.traceID) err = s.addJobFunc(job) if err != nil { return err @@ -1829,7 +1839,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } } - s.tctx.L().Info("finish to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end position", ec.currentPos)) + s.tctx.L().Info("finish to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) return nil } @@ -1886,7 +1896,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. } if shouldExecDDLOnSchemaTracker { if err := s.schemaTracker.Exec(s.tctx.Ctx, usedSchema, sql); err != nil { - s.tctx.L().Error("cannot track DDL", zap.String("schema", usedSchema), zap.String("statement", sql), log.WrapStringerField("position", ec.currentPos), log.ShortError(err)) + s.tctx.L().Error("cannot track DDL", zap.String("schema", usedSchema), zap.String("statement", sql), log.WrapStringerField("location", ec.currentLocation), log.ShortError(err)) return terror.ErrSchemaTrackerCannotExecDDL.Delegate(err, sql) } } @@ -1894,12 +1904,12 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. return nil } -func (s *Syncer) commitJob(tp opType, sourceSchema, sourceTable, targetSchema, targetTable, sql string, args []interface{}, keys []string, retry bool, pos, cmdPos mysql.Position, gs gtid.Set, traceID string) error { +func (s *Syncer) commitJob(tp opType, sourceSchema, sourceTable, targetSchema, targetTable, sql string, args []interface{}, keys []string, retry bool, location, cmdLocation binlog.Location, traceID string) error { key, err := s.resolveCasuality(keys) if err != nil { return terror.ErrSyncerUnitResolveCasualityFail.Generate(err) } - job := newJob(tp, sourceSchema, sourceTable, targetSchema, targetTable, sql, args, key, pos, cmdPos, gs, traceID) + job := newJob(tp, sourceSchema, sourceTable, targetSchema, targetTable, sql, args, key, location, cmdLocation, traceID) return s.addJobFunc(job) } @@ -1980,11 +1990,11 @@ func (s *Syncer) printStatus(ctx context.Context) { tps = (total - last) / seconds totalTps = total / totalSeconds - s.currentPosMu.RLock() - currentPos := s.currentPosMu.currentPos - s.currentPosMu.RUnlock() + s.currentLocationMu.RLock() + currentLocation := s.currentLocationMu.currentLocation + s.currentLocationMu.RUnlock() - remainingSize, err2 := s.fromDB.countBinaryLogsSize(currentPos) + remainingSize, err2 := s.fromDB.countBinaryLogsSize(currentLocation.Position) if err2 != nil { // log the error, but still handle the rest operation s.tctx.L().Error("fail to estimate unreplicated binlog size", zap.Error(err2)) @@ -2080,8 +2090,8 @@ func (s *Syncer) closeDBs() { // record skip ddl/dml sqls' position // make newJob's sql argument empty to distinguish normal sql and skips sql -func (s *Syncer) recordSkipSQLsPos(pos mysql.Position, gtidSet gtid.Set) error { - job := newSkipJob(pos, gtidSet) +func (s *Syncer) recordSkipSQLsLocation(location binlog.Location) error { + job := newSkipJob(location) return s.addJobFunc(job) } @@ -2091,13 +2101,13 @@ func (s *Syncer) flushJobs() error { return s.addJobFunc(job) } -func (s *Syncer) reSyncBinlog(tctx tcontext.Context, pos mysql.Position) error { +func (s *Syncer) reSyncBinlog(tctx tcontext.Context, location binlog.Location) error { err := s.retrySyncGTIDs() if err != nil { return err } // close still running sync - return s.streamerController.ReopenWithRetry(&tctx, pos) + return s.streamerController.ReopenWithRetry(&tctx, location) } func (s *Syncer) renameShardingSchema(schema, table string) (string, string) { @@ -2320,13 +2330,14 @@ func (s *Syncer) needResync() bool { // Currently, syncer doesn't handle Format_desc and Previous_gtids events. When binlog rotate to new file with only two events like above, // syncer won't save pos to 194. Actually it save pos 4 to meta file. So We got a experience value of 194 - 4 = 190. // If (mpos.Pos - spos.Pos) > 190, we could say that syncer is not up-to-date. - return utils.CompareBinlogPos(masterPos, s.checkpoint.GlobalPoint(), 190) == 1 + return utils.CompareBinlogPos(masterPos, s.checkpoint.GlobalPoint().Position, 190) == 1 } // assume that reset master before switching to new master, and only the new master would write // it's a weak function to try best to fix gtid set while switching master/slave func (s *Syncer) retrySyncGTIDs() error { // NOTE: our (per-table based) checkpoint does not support GTID yet, implement it if needed + // TODO: support GTID s.tctx.L().Warn("our (per-table based) checkpoint does not support GTID yet") return nil } diff --git a/syncer/warning.go b/syncer/warning.go index 8df932e77e..89011320fe 100644 --- a/syncer/warning.go +++ b/syncer/warning.go @@ -45,7 +45,7 @@ func (s *Syncer) Error() interface{} { if !utils.IsContextCanceledError(ctx.err) { errors = append(errors, &pb.SyncSQLError{ Msg: ctx.err.Error(), - FailedBinlogPosition: fmt.Sprintf("%s:%d", ctx.pos.Name, ctx.pos.Pos), + FailedBinlogPosition: fmt.Sprintf("%s:%d", ctx.location.Position.Name, ctx.location.Position.Pos), ErrorSQL: ctx.jobs, }) } From 80619bfda49c893b08329d7a55872c372a80c5fc Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Thu, 5 Mar 2020 12:21:27 +0800 Subject: [PATCH 03/35] update --- dm/worker/server.go | 5 ++- syncer/inject_sql.go | 2 +- syncer/operator.go | 3 +- syncer/sharding-meta/shardmeta.go | 8 ++-- syncer/sharding_group.go | 45 ++++++++++--------- syncer/streamer_controller.go | 13 +++--- syncer/syncer.go | 74 ++++++++++++++++--------------- syncer/warning.go | 2 +- 8 files changed, 82 insertions(+), 70 deletions(-) diff --git a/dm/worker/server.go b/dm/worker/server.go index 7e8b47876b..018f8e9c51 100644 --- a/dm/worker/server.go +++ b/dm/worker/server.go @@ -761,6 +761,7 @@ func makeCommonWorkerResponse(reqErr error) *pb.CommonWorkerResponse { // all subTask in subTaskCfgs should have same source // this function return the min position in all subtasks, used for relay's position +// Notes: used for relay, so don't need to use GTID func getMinPosInAllSubTasks(ctx context.Context, subTaskCfgs []*config.SubTaskConfig) (minPos *mysql.Position, err error) { for _, subTaskCfg := range subTaskCfgs { pos, err := getMinPosForSubTaskFunc(ctx, subTaskCfg) @@ -806,6 +807,6 @@ func getMinPosForSubTask(ctx context.Context, subTaskCfg *config.SubTaskConfig) return nil, errors.Annotate(err, "get min position from checkpoint") } - pos := checkpoint.GlobalPoint() - return &pos, nil + location := checkpoint.GlobalPoint() + return &location.Position, nil } diff --git a/syncer/inject_sql.go b/syncer/inject_sql.go index 3b21acbd8b..1f79c768e8 100644 --- a/syncer/inject_sql.go +++ b/syncer/inject_sql.go @@ -23,9 +23,9 @@ import ( "github.com/siddontang/go-mysql/replication" "go.uber.org/zap" + "github.com/pingcap/dm/pkg/binlog" parserpkg "github.com/pingcap/dm/pkg/parser" "github.com/pingcap/dm/pkg/terror" - "github.com/pingcap/dm/pkg/binlog" ) // InjectSQLs injects ddl into syncer as binlog events while meet xid/query event diff --git a/syncer/operator.go b/syncer/operator.go index 260e828deb..40fa479336 100644 --- a/syncer/operator.go +++ b/syncer/operator.go @@ -28,5 +28,6 @@ func (s *Syncer) SetSQLOperator(req *pb.HandleSubTaskSQLsRequest) error { // tryApplySQLOperator tries to get SQLs by applying an possible operator // return whether applied, and the applied SQLs func (s *Syncer) tryApplySQLOperator(location binlog.Location, sqls []string) (bool, []string, error) { - return s.sqlOperatorHolder.Apply(s.tctx, location, sqls) + // TODO: support GTID + return s.sqlOperatorHolder.Apply(s.tctx, location.Position, sqls) } diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index 2a80dbda01..e62115b0e2 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -34,16 +34,16 @@ const ( // DDLItem records ddl information used in sharding sequence organization type DDLItem struct { FirstLocation binlog.Location `json:"first-location"` // first DDL's binlog Pos, not the End_log_pos of the event - DDLs []string `json:"ddls"` // DDLs, these ddls are in the same QueryEvent - Source string `json:"source"` // source table ID + DDLs []string `json:"ddls"` // DDLs, these ddls are in the same QueryEvent + Source string `json:"source"` // source table ID } // NewDDLItem creates a new DDLItem func NewDDLItem(location binlog.Location, ddls []string, source string) *DDLItem { return &DDLItem{ FirstLocation: location, - DDLs: ddls, - Source: source, + DDLs: ddls, + Source: source, } } diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index facef1254d..071a06aa29 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -104,18 +104,18 @@ type ShardingGroup struct { firstLocation *binlog.Location // first DDL's binlog pos and gtid, used to restrain the global checkpoint when un-resolved firstEndLocation *binlog.Location // first DDL's binlog End_log_pos and gtid, used to re-direct binlog streamer after synced - ddls []string // DDL which current in syncing + ddls []string // DDL which current in syncing } // NewShardingGroup creates a new ShardingGroup func NewShardingGroup(sourceID, shardMetaSchema, shardMetaTable string, sources []string, meta *shardmeta.ShardingMeta, isSchemaOnly bool) *ShardingGroup { sg := &ShardingGroup{ - remain: len(sources), - sources: make(map[string]bool, len(sources)), - IsSchemaOnly: isSchemaOnly, - sourceID: sourceID, - firstLocation: nil, - firstEndLocation: nil, + remain: len(sources), + sources: make(map[string]bool, len(sources)), + IsSchemaOnly: isSchemaOnly, + sourceID: sourceID, + firstLocation: nil, + firstEndLocation: nil, } if meta != nil { sg.meta = meta @@ -243,10 +243,10 @@ func (sg *ShardingGroup) UnresolvedGroupInfo() *pb.ShardingGroup { } group := &pb.ShardingGroup{ - DDLs: sg.ddls, + DDLs: sg.ddls, FirstLocation: sg.firstLocation.String(), - Synced: make([]string, 0, len(sg.sources)-sg.remain), - Unsynced: make([]string, 0, sg.remain), + Synced: make([]string, 0, len(sg.sources)-sg.remain), + Unsynced: make([]string, 0, sg.remain), } for source, synced := range sg.sources { if synced { @@ -313,7 +313,7 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { defer sg.RUnlock() if sg.remain < len(sg.sources) && sg.firstLocation != nil { // create a new pos to return - return &binlog.Location { + return &binlog.Location{ Position: mysql.Position{ Name: sg.firstLocation.Position.Name, Pos: sg.firstLocation.Position.Pos, @@ -324,7 +324,7 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { item := sg.meta.GetGlobalActiveDDL() if item != nil { // make a new copy - return &binlog.Location { + return &binlog.Location{ Position: mysql.Position{ Name: item.FirstLocation.Position.Name, Pos: item.FirstLocation.Position.Pos, @@ -336,14 +336,17 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { } // FirstEndPosUnresolved returns the first DDL End_log_pos if un-resolved, else nil -func (sg *ShardingGroup) FirstEndPosUnresolved() *mysql.Position { +func (sg *ShardingGroup) FirstEndPosUnresolved() *binlog.Location { sg.RLock() defer sg.RUnlock() if sg.remain < len(sg.sources) && sg.firstEndLocation != nil { // create a new pos to return - return &mysql.Position{ - Name: sg.firstEndLocation.Position.Name, - Pos: sg.firstEndLocation.Position.Pos, + return &binlog.Location{ + Position: mysql.Position{ + Name: sg.firstEndLocation.Position.Name, + Pos: sg.firstEndLocation.Position.Pos, + }, + GTID: sg.firstEndLocation.GTID, } } return nil @@ -768,9 +771,9 @@ func (k *ShardingGroupKeeper) LoadShardMeta() (map[string]*shardmeta.ShardingMet // ShardingReSync represents re-sync info for a sharding DDL group type ShardingReSync struct { - currLocation binlog.Location // current DDL's binlog location, initialize to first DDL's location - latestLocation binlog.Location // latest DDL's binlog location - targetSchema string - targetTable string - allResolved bool + currLocation binlog.Location // current DDL's binlog location, initialize to first DDL's location + latestLocation binlog.Location // latest DDL's binlog location + targetSchema string + targetTable string + allResolved bool } diff --git a/syncer/streamer_controller.go b/syncer/streamer_controller.go index c45a6dd10b..76ed089e9c 100644 --- a/syncer/streamer_controller.go +++ b/syncer/streamer_controller.go @@ -75,12 +75,15 @@ func (r *remoteBinlogReader) generateStreamer(location binlog.Location) (streame //return nil, terror.ErrSyncerUnitRemoteSteamerWithGTID.Generate() var gtid mysql.GTIDSet var err error - if r.flavor == mysql.MySQLFlavor { - gtid, err := mysql.ParseMysqlGTIDSet(location.GTID) + /* + if r.flavor == mysql.MySQLFlavor { + gtid, err = mysql.ParseMysqlGTIDSet(location.GTID) - } else { - gtid, err := mysql.ParseMariadbGTID(location.GTID) - } + } else { + gtid, err = mysql.ParseMariadbGTID(location.GTID) + } + */ + gtid, err = mysql.ParseGTIDSet(r.flavor, location.GTID) if err != nil { // TODO: use terror return nil, err diff --git a/syncer/syncer.go b/syncer/syncer.go index 75e5bcec1e..18034de6e0 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1067,13 +1067,13 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // * compare last pos with current binlog's pos to determine whether re-sync completed // 6. use the global streamer to continue the syncing var ( - shardingReSyncCh = make(chan *ShardingReSync, 10) - shardingReSync *ShardingReSync - savedGlobalLastLocation binlog.Location - latestOp opType // latest job operation tp - eventTimeoutCounter time.Duration - traceSource = fmt.Sprintf("%s.syncer.%s", s.cfg.SourceID, s.cfg.Name) - traceID string + shardingReSyncCh = make(chan *ShardingReSync, 10) + shardingReSync *ShardingReSync + savedGlobalLastLocation binlog.Location + latestOp opType // latest job operation tp + eventTimeoutCounter time.Duration + traceSource = fmt.Sprintf("%s.syncer.%s", s.cfg.SourceID, s.cfg.Name) + traceID string ) closeShardingResync := func() error { @@ -1209,8 +1209,8 @@ func (s *Syncer) Run(ctx context.Context) (err error) { ec := eventContext{ tctx: tctx, header: e.Header, - currentLocation: ¤tLocation, - lastLocation: &lastLocation, + currentLocation: ¤tLocation, + lastLocation: &lastLocation, shardingReSync: shardingReSync, latestOp: &latestOp, closeShardingResync: closeShardingResync, @@ -1270,8 +1270,8 @@ func (s *Syncer) Run(ctx context.Context) (err error) { type eventContext struct { tctx *tcontext.Context header *replication.EventHeader - currentLocation *binlog.Location - lastLocation *binlog.Location + currentLocation *binlog.Location + lastLocation *binlog.Location shardingReSync *ShardingReSync latestOp *opType closeShardingResync func() error @@ -1287,7 +1287,7 @@ type eventContext struct { // TODO: Further split into smaller functions and group common arguments into // a context struct. func (s *Syncer) handleRotateEvent(ev *replication.RotateEvent, ec eventContext) error { - *ec.currentLocation = binlog.Location { + *ec.currentLocation = binlog.Location{ Position: mysql.Position{ Name: string(ev.NextLogName), Pos: uint32(ev.Position), @@ -1323,7 +1323,7 @@ func (s *Syncer) handleRotateEvent(ev *replication.RotateEvent, ec eventContext) func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) error { originSchema, originTable := string(ev.Table.Schema), string(ev.Table.Table) schemaName, tableName := s.renameShardingSchema(originSchema, originTable) - *ec.currentLocation = binlog.Location { + *ec.currentLocation = binlog.Location{ Position: mysql.Position{ Name: ec.lastLocation.Position.Name, Pos: ec.header.LogPos, @@ -1468,7 +1468,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err } func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) error { - *ec.currentLocation = binlog.Location { + *ec.currentLocation = binlog.Location{ Position: mysql.Position{ Name: ec.lastLocation.Position.Name, Pos: ec.header.LogPos, @@ -1684,9 +1684,13 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e ) // for sharding DDL, the firstPos should be the `Pos` of the binlog, not the `End_log_pos` // so when restarting before sharding DDLs synced, this binlog can be re-sync again to trigger the TrySync - startPos := mysql.Position{ - Name: ec.currentLocation.Position.Name, - Pos: ec.currentLocation.Position.Pos - ec.header.EventSize, + startLocation := binlog.Location{ + Position: mysql.Position{ + Name: ec.currentLocation.Position.Name, + Pos: ec.currentLocation.Position.Pos - ec.header.EventSize, + }, + // TODO: use what GTID? + // GTID: currentLocation } source, _ = GenTableID(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name) @@ -1702,19 +1706,19 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } annotate = "add table to shard group" default: - needShardingHandle, group, synced, active, remain, err = s.sgk.TrySync(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name, source, startPos, *ec.currentLocation, needHandleDDLs) + needShardingHandle, group, synced, active, remain, err = s.sgk.TrySync(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name, source, startLocation, *ec.currentLocation, needHandleDDLs) if err != nil { return err } annotate = "try to sync table in shard group" // meets DDL that will not be processed in sequence sharding if !active { - s.tctx.L().Info("skip in-activeDDL", zap.String("event", "query"), zap.String("source", source), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Bool("in-sharding", needShardingHandle), zap.Stringer("start position", startPos), zap.Bool("is-synced", synced), zap.Int("unsynced", remain)) + s.tctx.L().Info("skip in-activeDDL", zap.String("event", "query"), zap.String("source", source), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Bool("in-sharding", needShardingHandle), zap.Stringer("start location", startLocation), zap.Bool("is-synced", synced), zap.Int("unsynced", remain)) return nil } } - s.tctx.L().Info(annotate, zap.String("event", "query"), zap.String("source", source), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Bool("in-sharding", needShardingHandle), zap.Stringer("start position", startPos), zap.Bool("is-synced", synced), zap.Int("unsynced", remain)) + s.tctx.L().Info(annotate, zap.String("event", "query"), zap.String("source", source), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Bool("in-sharding", needShardingHandle), zap.Stringer("start location", startLocation), zap.Bool("is-synced", synced), zap.Int("unsynced", remain)) if needShardingHandle { target, _ := GenTableID(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name) @@ -1733,14 +1737,14 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // save checkpoint in memory, don't worry, if error occurred, we can rollback it // for non-last sharding DDL's table, this checkpoint will be used to skip binlog event when re-syncing // NOTE: when last sharding DDL executed, all this checkpoints will be flushed in the same txn - s.tctx.L().Info("save table checkpoint for source", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) + s.tctx.L().Info("save table checkpoint for source", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) s.saveTablePoint(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name, *ec.currentLocation) if !synced { - s.tctx.L().Info("source shard group is not synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) + s.tctx.L().Info("source shard group is not synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) return nil } - s.tctx.L().Info("source shard group is synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) + s.tctx.L().Info("source shard group is synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) err = ec.safeMode.DescForTable(s.tctx, ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name) // try disable safe-mode after sharding group synced if err != nil { return err @@ -1749,8 +1753,8 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e if cap(*ec.shardingReSyncCh) < len(sqls) { *ec.shardingReSyncCh = make(chan *ShardingReSync, len(sqls)) } - firstEndPos := group.FirstEndPosUnresolved() - if firstEndPos == nil { + firstEndLocation := group.FirstEndPosUnresolved() + if firstEndLocation == nil { return terror.ErrSyncerUnitFirstEndPosNotFound.Generate(source) } @@ -1759,11 +1763,11 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e return err2 } *ec.shardingReSyncCh <- &ShardingReSync{ - currPos: *firstEndPos, - latestLocation: *ec.currentLocation, - targetSchema: ddlInfo.tableNames[1][0].Schema, - targetTable: ddlInfo.tableNames[1][0].Name, - allResolved: allResolved, + currLocation: *firstEndLocation, + latestLocation: *ec.currentLocation, + targetSchema: ddlInfo.tableNames[1][0].Schema, + targetTable: ddlInfo.tableNames[1][0].Name, + allResolved: allResolved, } // Don't send new DDLInfo to dm-master until all local sql jobs finished @@ -1805,13 +1809,13 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } }) - s.tctx.L().Info("execute DDL job", zap.String("event", "query"), zap.String("source", source), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation), zap.Stringer("operation", shardOp)) + s.tctx.L().Info("execute DDL job", zap.String("event", "query"), zap.String("source", source), zap.ByteString("raw statement", ev.Query), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation), zap.Stringer("operation", shardOp)) } else { - s.tctx.L().Info("ignore DDL job", zap.String("event", "query"), zap.String("source", source), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation), zap.Stringer("operation", shardOp)) + s.tctx.L().Info("ignore DDL job", zap.String("event", "query"), zap.String("source", source), zap.ByteString("raw statement", ev.Query), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation), zap.Stringer("operation", shardOp)) } } - s.tctx.L().Info("start to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) + s.tctx.L().Info("start to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) // try apply SQL operator before addJob. now, one query event only has one DDL job, if updating to multi DDL jobs, refine this. applied, appliedSQLs, err := s.tryApplySQLOperator(*ec.currentLocation, needHandleDDLs) @@ -1819,7 +1823,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e return terror.Annotatef(err, "try apply SQL operator on binlog-location %s with DDLs %v", ec.currentLocation, needHandleDDLs) } if applied { - s.tctx.L().Info("replace ddls to preset ddls by sql operator in shard mode", zap.String("event", "query"), zap.Strings("preset ddls", appliedSQLs), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) + s.tctx.L().Info("replace ddls to preset ddls by sql operator in shard mode", zap.String("event", "query"), zap.Strings("preset ddls", appliedSQLs), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) needHandleDDLs = appliedSQLs // maybe nil } job := newDDLJob(ddlInfo, needHandleDDLs, *ec.lastLocation, *ec.currentLocation, *ec.traceID) @@ -1839,7 +1843,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } } - s.tctx.L().Info("finish to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start position", startPos), log.WrapStringerField("end location", ec.currentLocation)) + s.tctx.L().Info("finish to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) return nil } diff --git a/syncer/warning.go b/syncer/warning.go index 89011320fe..6b42257f63 100644 --- a/syncer/warning.go +++ b/syncer/warning.go @@ -37,7 +37,7 @@ func (s *Syncer) Error() interface{} { defer s.execErrors.Unlock() sort.Slice(s.execErrors.errors, func(i, j int) bool { - return utils.CompareBinlogPos(s.execErrors.errors[i].pos, s.execErrors.errors[j].pos, 0) == -1 + return utils.CompareBinlogPos(s.execErrors.errors[i].location.Position, s.execErrors.errors[j].location.Position, 0) == -1 }) errors := make([]*pb.SyncSQLError, 0, len(s.execErrors.errors)) From f9ccc8c040b4abbb3962d7c0f6152852fb2bbb3e Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Thu, 5 Mar 2020 16:12:32 +0800 Subject: [PATCH 04/35] add TODO --- syncer/syncer.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/syncer/syncer.go b/syncer/syncer.go index 18034de6e0..5d3774eb84 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1242,6 +1242,9 @@ func (s *Syncer) Run(ctx context.Context) (err error) { case *replication.XIDEvent: if shardingReSync != nil { shardingReSync.currLocation.Position.Pos = e.Header.LogPos + // TODO:update gtid + + // only need compare binlog position? lastLocation = shardingReSync.currLocation if binlog.CompareLocation(shardingReSync.currLocation, shardingReSync.latestLocation) >= 0 { s.tctx.L().Info("re-replicate shard group was completed", zap.String("event", "XID"), zap.Reflect("re-shard", shardingReSync)) @@ -1255,6 +1258,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { latestOp = xid currentLocation.Position.Pos = e.Header.LogPos + // TODO:update gtid s.tctx.L().Debug("", zap.String("event", "XID"), zap.Stringer("last location", lastLocation), log.WrapStringerField("location", currentLocation), log.WrapStringerField("gtid set", ev.GSet)) lastLocation.Position.Pos = e.Header.LogPos // update lastPos @@ -1473,8 +1477,8 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e Name: ec.lastLocation.Position.Name, Pos: ec.header.LogPos, }, - // TODO: use ev.GSet? - GTID: ec.lastLocation.GTID, + // TODO: use new ev.GSet? + //GTID: } sql := strings.TrimSpace(string(ev.Query)) usedSchema := string(ev.Schema) @@ -1488,6 +1492,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e binlogSkippedEventsTotal.WithLabelValues("query", s.cfg.Name).Inc() s.tctx.L().Warn("skip event", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema)) *ec.lastLocation = *ec.currentLocation // before record skip location, update lastLocation + // TODO: update GTID return s.recordSkipSQLsLocation(*ec.lastLocation) } if !parseResult.isDDL { @@ -1497,6 +1502,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e if ec.shardingReSync != nil { ec.shardingReSync.currLocation.Position.Pos = ec.header.LogPos + // TODO: update GTID? if binlog.CompareLocation(ec.shardingReSync.currLocation, ec.shardingReSync.latestLocation) >= 0 { s.tctx.L().Info("re-replicate shard group was completed", zap.String("event", "query"), zap.String("statement", sql), zap.Reflect("re-shard", ec.shardingReSync)) err2 := ec.closeShardingResync() @@ -1508,6 +1514,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // as they have been added to sharding DDL sequence // only update lastPos when the query is a real DDL *ec.lastLocation = ec.shardingReSync.currLocation + // TODO: set gtid s.tctx.L().Debug("skip event in re-replicating sharding group", zap.String("event", "query"), zap.String("statement", sql), zap.Reflect("re-shard", ec.shardingReSync)) } return nil From 36b1f8b8c2426060a9a4c7e6e39a8ad227aee6a3 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Thu, 5 Mar 2020 17:37:33 +0800 Subject: [PATCH 05/35] use gtid.Set --- pkg/binlog/position.go | 5 +++-- syncer/checkpoint.go | 30 ++++++++++++++++++------------ syncer/sharding_group.go | 6 +++--- syncer/status.go | 2 +- syncer/streamer_controller.go | 19 +++++++------------ syncer/syncer.go | 4 ++-- 6 files changed, 34 insertions(+), 32 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 88fcff0766..62c8177f92 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -20,6 +20,7 @@ import ( gmysql "github.com/siddontang/go-mysql/mysql" + "github.com/pingcap/dm/pkg/gtid" "github.com/pingcap/dm/pkg/terror" "github.com/pingcap/dm/pkg/utils" ) @@ -166,11 +167,11 @@ func ComparePosition(pos1, pos2 gmysql.Position) int { type Location struct { Position gmysql.Position - GTID string + GTIDSet gtid.Set } func (p Location) String() string { - return fmt.Sprintf("Position: %v, GTID: %s", p.Position, p.GTID) + return fmt.Sprintf("Position: %v, GTID: %s", p.Position, p.GTIDSet) } // CompareLocation returns: diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 1cb1d0ad95..248b1dfc2d 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -24,6 +24,7 @@ import ( pbinlog "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" + "github.com/pingcap/dm/pkg/gtid" "github.com/pingcap/dm/pkg/log" "github.com/pingcap/dm/pkg/schema" "github.com/pingcap/dm/pkg/terror" @@ -619,16 +620,16 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T // checkpoints in DB have higher priority // if don't want to use checkpoint in DB, set `remove-previous-checkpoint` to `true` var ( - cpSchema string - cpTable string - binlogName string - binlogPos uint32 - binlogGTID string - tiBytes []byte - isGlobal bool + cpSchema string + cpTable string + binlogName string + binlogPos uint32 + binlogGTIDSet gtid.Set + tiBytes []byte + isGlobal bool ) for rows.Next() { - err := rows.Scan(&cpSchema, &cpTable, &binlogName, &binlogPos, &binlogGTID, &tiBytes, &isGlobal) + err := rows.Scan(&cpSchema, &cpTable, &binlogName, &binlogPos, &binlogGTIDSet, &tiBytes, &isGlobal) if err != nil { return terror.WithScope(terror.DBErrorAdapt(err, terror.ErrDBDriverError), terror.ScopeDownstream) } @@ -637,7 +638,7 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T Name: binlogName, Pos: binlogPos, }, - GTID: binlogGTID, + GTIDSet: binlogGTIDSet, } if isGlobal { if pbinlog.CompareLocation(location, minLocation) > 0 { @@ -736,7 +737,7 @@ func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location pbin if len(tiBytes) == 0 { tiBytes = []byte("null") } - args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, location.GTID, tiBytes, isGlobal} + args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, location.GTIDSet, tiBytes, isGlobal} return sql2, args } @@ -744,13 +745,18 @@ func (cp *RemoteCheckPoint) parseMetaData() (*pbinlog.Location, error) { // `metadata` is mydumper's output meta file name filename := path.Join(cp.cfg.Dir, "metadata") cp.logCtx.L().Info("parsing metadata from file", zap.String("file", filename)) - pos, gtid, err := utils.ParseMetaData(filename) + pos, gsetStr, err := utils.ParseMetaData(filename) + if err != nil { + return nil, err + } + + gset, err := gtid.ParserGTID(cp.cfg.Flavor, gsetStr) if err != nil { return nil, err } return &pbinlog.Location{ Position: *pos, - GTID: gtid, + GTIDSet: gset, }, nil } diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index 071a06aa29..214eb1067a 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -318,7 +318,7 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { Name: sg.firstLocation.Position.Name, Pos: sg.firstLocation.Position.Pos, }, - GTID: sg.firstLocation.GTID, + GTIDSet: sg.firstLocation.GTIDSet, } } item := sg.meta.GetGlobalActiveDDL() @@ -329,7 +329,7 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { Name: item.FirstLocation.Position.Name, Pos: item.FirstLocation.Position.Pos, }, - GTID: item.FirstLocation.GTID, + GTIDSet: item.FirstLocation.GTIDSet, } } return nil @@ -346,7 +346,7 @@ func (sg *ShardingGroup) FirstEndPosUnresolved() *binlog.Location { Name: sg.firstEndLocation.Position.Name, Pos: sg.firstEndLocation.Position.Pos, }, - GTID: sg.firstEndLocation.GTID, + GTIDSet: sg.firstEndLocation.GTIDSet, } } return nil diff --git a/syncer/status.go b/syncer/status.go index 5d87de58bb..2a5198daf8 100644 --- a/syncer/status.go +++ b/syncer/status.go @@ -53,7 +53,7 @@ func (s *Syncer) Status() interface{} { if masterGTIDSet != nil { // masterGTIDSet maybe a nil interface st.MasterBinlogGtid = masterGTIDSet.String() } - st.SyncerBinlogGtid = syncerLocation.GTID + st.SyncerBinlogGtid = syncerLocation.GTIDSet.String() st.BinlogType = "unknown" if s.streamerController != nil { diff --git a/syncer/streamer_controller.go b/syncer/streamer_controller.go index 76ed089e9c..54fee33286 100644 --- a/syncer/streamer_controller.go +++ b/syncer/streamer_controller.go @@ -73,22 +73,17 @@ func (r *remoteBinlogReader) generateStreamer(location binlog.Location) (streame if r.EnableGTID { // NOTE: our (per-table based) checkpoint does not support GTID yet //return nil, terror.ErrSyncerUnitRemoteSteamerWithGTID.Generate() - var gtid mysql.GTIDSet - var err error /* - if r.flavor == mysql.MySQLFlavor { - gtid, err = mysql.ParseMysqlGTIDSet(location.GTID) + var gtid mysql.GTIDSet - } else { - gtid, err = mysql.ParseMariadbGTID(location.GTID) + var err error + gtid, err = mysql.ParseGTIDSet(r.flavor, location.GTIDSet) + if err != nil { + // TODO: use terror + return nil, err } */ - gtid, err = mysql.ParseGTIDSet(r.flavor, location.GTID) - if err != nil { - // TODO: use terror - return nil, err - } - streamer, err := r.reader.StartSyncGTID(gtid) + streamer, err := r.reader.StartSyncGTID(location.GTIDSet.Origin()) if err != nil { return nil, err } diff --git a/syncer/syncer.go b/syncer/syncer.go index 5d3774eb84..62ce196e1b 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1296,7 +1296,7 @@ func (s *Syncer) handleRotateEvent(ev *replication.RotateEvent, ec eventContext) Name: string(ev.NextLogName), Pos: uint32(ev.Position), }, - GTID: ec.currentLocation.GTID, + GTIDSet: ec.currentLocation.GTIDSet, } if binlog.CompareLocation(*ec.currentLocation, *ec.lastLocation) > 0 { *ec.lastLocation = *ec.currentLocation @@ -1332,7 +1332,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err Name: ec.lastLocation.Position.Name, Pos: ec.header.LogPos, }, - GTID: ec.lastLocation.GTID, + GTIDSet: ec.lastLocation.GTIDSet, } if ec.shardingReSync != nil { From 4b5ede369cc2c32acb1eb366701398d464dbe570 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Fri, 6 Mar 2020 00:20:12 +0800 Subject: [PATCH 06/35] update test --- pkg/binlog/position.go | 14 ++++- pkg/utils/mydumper_test.go | 10 +++- syncer/checkpoint_test.go | 79 +++++++++++++------------- syncer/job_test.go | 11 ++-- syncer/sharding-meta/shardmeta_test.go | 60 +++++++++---------- syncer/sharding_group_test.go | 9 +-- syncer/streamer_controller.go | 13 ----- syncer/syncer.go | 8 ++- syncer/syncer_test.go | 15 ++--- 9 files changed, 115 insertions(+), 104 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 62c8177f92..01a39b7cec 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -180,5 +180,17 @@ func (p Location) String() string { // -1 if point1 is less than point2 func CompareLocation(location1, location2 Location) int { // TODO: compare gtid - return 1 + if len(location1.String()) != 0 && len(location2.String()) != 0 { + contain1 := location1.GTIDSet.Contain(location2.GTIDSet) + if contain1 { + return 1 + } + + contain2 := location2.GTIDSet.Contain(location1.GTIDSet) + if contain2 { + return -1 + } + } + + return ComparePosition(location1.Position, location2.Position) } diff --git a/pkg/utils/mydumper_test.go b/pkg/utils/mydumper_test.go index 9e8508fea3..a12856a7b9 100644 --- a/pkg/utils/mydumper_test.go +++ b/pkg/utils/mydumper_test.go @@ -27,8 +27,9 @@ func (t *testUtilsSuite) TestParseMetaData(c *C) { defer os.Remove(f.Name()) testCases := []struct { - source string - pos *mysql.Position + source string + pos *mysql.Position + gsetStr string }{ { `Started dump at: 2018-12-28 07:20:49 @@ -42,6 +43,7 @@ Finished dump at: 2018-12-28 07:20:51`, Name: "bin.000001", Pos: 2479, }, + "97b5142f-e19c-11e8-808c-0242ac110005:1-13", }, { `Started dump at: 2018-12-27 19:51:22 @@ -61,14 +63,16 @@ Finished dump at: 2018-12-27 19:51:22`, Name: "mysql-bin.000003", Pos: 3295817, }, + "", }, } for _, tc := range testCases { err := ioutil.WriteFile(f.Name(), []byte(tc.source), 0644) c.Assert(err, IsNil) - pos, err := ParseMetaData(f.Name()) + pos, gsetStr, err := ParseMetaData(f.Name()) c.Assert(err, IsNil) c.Assert(pos, DeepEquals, tc.pos) + c.Assert(gsetStr, Equals, tc.gsetStr) } } diff --git a/syncer/checkpoint_test.go b/syncer/checkpoint_test.go index 6d28db4331..cc4cf6617e 100644 --- a/syncer/checkpoint_test.go +++ b/syncer/checkpoint_test.go @@ -21,6 +21,7 @@ import ( "strings" "github.com/pingcap/dm/dm/config" + "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/retry" @@ -123,15 +124,15 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { tctx := tcontext.Background() // global checkpoint init to min - c.Assert(cp.GlobalPoint(), Equals, minCheckpoint) - c.Assert(cp.FlushedGlobalPoint(), Equals, minCheckpoint) + c.Assert(cp.GlobalPoint(), Equals, minLocation) + c.Assert(cp.FlushedGlobalPoint(), Equals, minLocation) // try load, but should load nothing s.mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(sqlmock.NewRows(nil)) err := cp.Load(tctx, s.tracker) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, minCheckpoint) - c.Assert(cp.FlushedGlobalPoint(), Equals, minCheckpoint) + c.Assert(cp.GlobalPoint(), Equals, minLocation) + c.Assert(cp.FlushedGlobalPoint(), Equals, minLocation) oldMode := s.cfg.Mode oldDir := s.cfg.Dir @@ -160,15 +161,15 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { s.mock.ExpectQuery(loadCheckPointSQL).WithArgs(cpid).WillReturnRows(sqlmock.NewRows(nil)) err = cp.Load(tctx, s.tracker) c.Assert(err, IsNil) - cp.SaveGlobalPoint(pos1) + cp.SaveGlobalPoint(binlog.Location{Position: pos1}) s.mock.ExpectBegin() s.mock.ExpectExec("(162)?"+flushCheckPointSQL).WithArgs(cpid, "", "", pos1.Name, pos1.Pos, []byte("null"), true).WillReturnResult(sqlmock.NewResult(0, 1)) s.mock.ExpectCommit() err = cp.FlushPointsExcept(tctx, nil, nil, nil) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, pos1) - c.Assert(cp.FlushedGlobalPoint(), Equals, pos1) + c.Assert(cp.GlobalPoint().Position, Equals, pos1) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, pos1) // try load from config pos1.Pos = 2044 @@ -176,8 +177,8 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { s.cfg.Meta = &config.Meta{BinLogName: pos1.Name, BinLogPos: pos1.Pos} err = cp.LoadMeta() c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, pos1) - c.Assert(cp.FlushedGlobalPoint(), Equals, pos1) + c.Assert(cp.GlobalPoint().Position, Equals, pos1) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, pos1) s.cfg.Mode = oldMode s.cfg.Meta = nil @@ -187,19 +188,19 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { Name: "mysql-bin.000005", Pos: 2052, } - cp.SaveGlobalPoint(pos2) - c.Assert(cp.GlobalPoint(), Equals, pos2) - c.Assert(cp.FlushedGlobalPoint(), Equals, pos1) + cp.SaveGlobalPoint(binlog.Location{Position: pos2}) + c.Assert(cp.GlobalPoint().Position, Equals, pos2) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, pos1) // test rollback cp.Rollback(s.tracker) - c.Assert(cp.GlobalPoint(), Equals, pos1) - c.Assert(cp.FlushedGlobalPoint(), Equals, pos1) + c.Assert(cp.GlobalPoint().Position, Equals, pos1) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, pos1) // save again - cp.SaveGlobalPoint(pos2) - c.Assert(cp.GlobalPoint(), Equals, pos2) - c.Assert(cp.FlushedGlobalPoint(), Equals, pos1) + cp.SaveGlobalPoint(binlog.Location{Position: pos2}) + c.Assert(cp.GlobalPoint().Position, Equals, pos2) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, pos1) // flush + rollback s.mock.ExpectBegin() @@ -208,19 +209,19 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { err = cp.FlushPointsExcept(tctx, nil, nil, nil) c.Assert(err, IsNil) cp.Rollback(s.tracker) - c.Assert(cp.GlobalPoint(), Equals, pos2) - c.Assert(cp.FlushedGlobalPoint(), Equals, pos2) + c.Assert(cp.GlobalPoint().Position, Equals, pos2) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, pos2) // try load from DB pos3 := pos2 pos3.Pos = pos2.Pos + 1000 // > pos2 to enable save - cp.SaveGlobalPoint(pos3) + cp.SaveGlobalPoint(binlog.Location{Position: pos3}) columns := []string{"cp_schema", "cp_table", "binlog_name", "binlog_pos", "table_info", "is_global"} s.mock.ExpectQuery(loadCheckPointSQL).WithArgs(cpid).WillReturnRows(sqlmock.NewRows(columns).AddRow("", "", pos2.Name, pos2.Pos, []byte("null"), true)) err = cp.Load(tctx, s.tracker) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, pos2) - c.Assert(cp.FlushedGlobalPoint(), Equals, pos2) + c.Assert(cp.GlobalPoint().Position, Equals, pos2) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, pos2) // test save older point /*var buf bytes.Buffer @@ -239,14 +240,14 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { s.mock.ExpectCommit() err = cp.Clear(tctx) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, minCheckpoint) - c.Assert(cp.FlushedGlobalPoint(), Equals, minCheckpoint) + c.Assert(cp.GlobalPoint(), Equals, minLocation) + c.Assert(cp.FlushedGlobalPoint(), Equals, minLocation) s.mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(sqlmock.NewRows(nil)) err = cp.Load(tctx, s.tracker) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, minCheckpoint) - c.Assert(cp.FlushedGlobalPoint(), Equals, minCheckpoint) + c.Assert(cp.GlobalPoint(), Equals, minLocation) + c.Assert(cp.FlushedGlobalPoint(), Equals, minLocation) } func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { @@ -266,22 +267,22 @@ func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { ) // not exist - newer := cp.IsNewerTablePoint(schema, table, pos1) + newer := cp.IsNewerTablePoint(schema, table, binlog.Location{Position: pos1}) c.Assert(newer, IsTrue) // save - cp.SaveTablePoint(schema, table, pos2, nil) - newer = cp.IsNewerTablePoint(schema, table, pos1) + cp.SaveTablePoint(schema, table, binlog.Location{Position: pos2}, nil) + newer = cp.IsNewerTablePoint(schema, table, binlog.Location{Position: pos1}) c.Assert(newer, IsFalse) // rollback, to min cp.Rollback(s.tracker) - newer = cp.IsNewerTablePoint(schema, table, pos1) + newer = cp.IsNewerTablePoint(schema, table, binlog.Location{Position: pos1}) c.Assert(newer, IsTrue) // save again - cp.SaveTablePoint(schema, table, pos2, nil) - newer = cp.IsNewerTablePoint(schema, table, pos1) + cp.SaveTablePoint(schema, table, binlog.Location{Position: pos2}, nil) + newer = cp.IsNewerTablePoint(schema, table, binlog.Location{Position: pos1}) c.Assert(newer, IsFalse) // flush + rollback @@ -291,7 +292,7 @@ func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { err = cp.FlushPointsExcept(tctx, nil, nil, nil) c.Assert(err, IsNil) cp.Rollback(s.tracker) - newer = cp.IsNewerTablePoint(schema, table, pos1) + newer = cp.IsNewerTablePoint(schema, table, binlog.Location{Position: pos1}) c.Assert(newer, IsFalse) // clear, to min @@ -300,12 +301,12 @@ func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { s.mock.ExpectCommit() err = cp.Clear(tctx) c.Assert(err, IsNil) - newer = cp.IsNewerTablePoint(schema, table, pos1) + newer = cp.IsNewerTablePoint(schema, table, binlog.Location{Position: pos1}) c.Assert(newer, IsTrue) // save - cp.SaveTablePoint(schema, table, pos2, nil) - newer = cp.IsNewerTablePoint(schema, table, pos1) + cp.SaveTablePoint(schema, table, binlog.Location{Position: pos2}, nil) + newer = cp.IsNewerTablePoint(schema, table, binlog.Location{Position: pos1}) c.Assert(newer, IsFalse) // test save table point less than global point @@ -316,8 +317,8 @@ func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { matchStr = strings.ReplaceAll(strings.ReplaceAll(matchStr, "(", "\\("), ")", "\\)") c.Assert(r, Matches, matchStr) }() - cp.SaveGlobalPoint(pos2) - cp.SaveTablePoint(schema, table, pos1, nil) + cp.SaveGlobalPoint(binlog.Location{Position: pos2}) + cp.SaveTablePoint(schema, table, binlog.Location{Position: pos1}, nil) }() // flush but except + rollback @@ -327,6 +328,6 @@ func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { err = cp.FlushPointsExcept(tctx, [][]string{{schema, table}}, nil, nil) c.Assert(err, IsNil) cp.Rollback(s.tracker) - newer = cp.IsNewerTablePoint(schema, table, pos1) + newer = cp.IsNewerTablePoint(schema, table, binlog.Location{Position: pos1}) c.Assert(newer, IsTrue) } diff --git a/syncer/job_test.go b/syncer/job_test.go index e536793389..6948104505 100644 --- a/syncer/job_test.go +++ b/syncer/job_test.go @@ -16,7 +16,8 @@ package syncer import ( . "github.com/pingcap/check" "github.com/pingcap/tidb-tools/pkg/filter" - "github.com/siddontang/go-mysql/mysql" + + "github.com/pingcap/dm/pkg/binlog" ) var _ = Suite(&testJobSuite{}) @@ -86,19 +87,19 @@ func (t *testJobSuite) TestJob(c *C) { jobStr string }{ { - newJob(insert, "test", "t1", "test", "t1", "insert into test.t1 values(?)", []interface{}{1}, "1", mysql.Position{}, mysql.Position{}, nil, ""), + newJob(insert, "test", "t1", "test", "t1", "insert into test.t1 values(?)", []interface{}{1}, "1", binlog.Location{}, binlog.Location{}, ""), "tp: insert, sql: insert into test.t1 values(?), args: [1], key: 1, ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", }, { - newDDLJob(ddlInfo, []string{"create database test"}, mysql.Position{}, mysql.Position{}, nil, ""), + newDDLJob(ddlInfo, []string{"create database test"}, binlog.Location{}, binlog.Location{}, ""), "tp: ddl, sql: , args: [], key: , ddls: [create database test], last_pos: (, 0), current_pos: (, 0), gtid:", }, { - newXIDJob(mysql.Position{}, mysql.Position{}, nil, ""), + newXIDJob(binlog.Location{}, binlog.Location{}, ""), "tp: xid, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", }, { newFlushJob(), "tp: flush, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", }, { - newSkipJob(mysql.Position{}, nil), + newSkipJob(binlog.Location{}), "tp: skip, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", }, } diff --git a/syncer/sharding-meta/shardmeta_test.go b/syncer/sharding-meta/shardmeta_test.go index c5b6ab5244..69f5149499 100644 --- a/syncer/sharding-meta/shardmeta_test.go +++ b/syncer/sharding-meta/shardmeta_test.go @@ -19,6 +19,8 @@ import ( "github.com/pingcap/check" "github.com/siddontang/go-mysql/mysql" + + "github.com/pingcap/dm/pkg/binlog" ) var _ = check.Suite(&testShardMetaSuite{}) @@ -36,7 +38,7 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { err error sqls []string args [][]interface{} - pos mysql.Position + location binlog.Location filename = "mysql-bin.000001" table1 = "table1" table2 = "table2" @@ -47,15 +49,15 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { tableID = "`target_db`.`target_table`" meta = NewShardingMeta(metaSchema, metaTable) items = []*DDLItem{ - NewDDLItem(mysql.Position{filename, 1000}, []string{"ddl1"}, table1), - NewDDLItem(mysql.Position{filename, 1200}, []string{"ddl2-1,ddl2-2"}, table1), - NewDDLItem(mysql.Position{filename, 1400}, []string{"ddl3"}, table1), - NewDDLItem(mysql.Position{filename, 1600}, []string{"ddl1"}, table2), - NewDDLItem(mysql.Position{filename, 1800}, []string{"ddl2-1,ddl2-2"}, table2), - NewDDLItem(mysql.Position{filename, 2000}, []string{"ddl3"}, table2), - NewDDLItem(mysql.Position{filename, 2200}, []string{"ddl1"}, table3), - NewDDLItem(mysql.Position{filename, 2400}, []string{"ddl2-1,ddl2-2"}, table3), - NewDDLItem(mysql.Position{filename, 2600}, []string{"ddl3"}, table3), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1000}}, []string{"ddl1"}, table1), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1200}}, []string{"ddl2-1,ddl2-2"}, table1), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1400}}, []string{"ddl3"}, table1), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1600}}, []string{"ddl1"}, table2), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1800}}, []string{"ddl2-1,ddl2-2"}, table2), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 2000}}, []string{"ddl3"}, table2), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 2200}}, []string{"ddl1"}, table3), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 2400}}, []string{"ddl2-1,ddl2-2"}, table3), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 2600}}, []string{"ddl3"}, table3), } ) @@ -76,9 +78,9 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.GetActiveDDLItem(table2), check.DeepEquals, items[3]) c.Assert(meta.GetActiveDDLItem(table3), check.DeepEquals, items[6]) c.Assert(meta.InSequenceSharding(), check.IsTrue) - pos, err = meta.ActiveDDLFirstPos() + location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(pos, check.DeepEquals, items[0].FirstPos) + c.Assert(location.Position, check.DeepEquals, items[0].FirstLocation) // find synced in shrading group, and call ShardingMeta.ResolveShardingDDL c.Assert(meta.ResolveShardingDDL(), check.IsFalse) @@ -88,9 +90,9 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.GetActiveDDLItem(table2), check.DeepEquals, items[4]) c.Assert(meta.GetActiveDDLItem(table3), check.IsNil) c.Assert(meta.InSequenceSharding(), check.IsTrue) - pos, err = meta.ActiveDDLFirstPos() + location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(pos, check.DeepEquals, items[1].FirstPos) + c.Assert(location.Position, check.DeepEquals, items[1].FirstLocation) sqls, args = meta.FlushData(sourceID, tableID) c.Assert(sqls, check.HasLen, 4) @@ -122,9 +124,9 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.GetActiveDDLItem(table2), check.DeepEquals, items[4]) c.Assert(meta.GetActiveDDLItem(table3), check.DeepEquals, items[7]) c.Assert(meta.InSequenceSharding(), check.IsTrue) - pos, err = meta.ActiveDDLFirstPos() + location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(pos, check.DeepEquals, items[1].FirstPos) + c.Assert(location.Position, check.DeepEquals, items[1].FirstLocation) // find synced in shrading group, and call ShardingMeta.ResolveShardingDDL c.Assert(meta.ResolveShardingDDL(), check.IsFalse) @@ -134,9 +136,9 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.GetActiveDDLItem(table2), check.DeepEquals, items[5]) c.Assert(meta.GetActiveDDLItem(table3), check.IsNil) c.Assert(meta.InSequenceSharding(), check.IsTrue) - pos, err = meta.ActiveDDLFirstPos() + location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(pos, check.DeepEquals, items[2].FirstPos) + c.Assert(location.Position, check.DeepEquals, items[2].FirstLocation) sqls, args = meta.FlushData(sourceID, tableID) c.Assert(sqls, check.HasLen, 4) @@ -167,9 +169,9 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.GetActiveDDLItem(table2), check.DeepEquals, items[5]) c.Assert(meta.GetActiveDDLItem(table3), check.DeepEquals, items[8]) c.Assert(meta.InSequenceSharding(), check.IsTrue) - pos, err = meta.ActiveDDLFirstPos() + location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(pos, check.DeepEquals, items[2].FirstPos) + c.Assert(location.Position, check.DeepEquals, items[2].FirstLocation) // find synced in shrading group, and call ShardingMeta.ResolveShardingDDL c.Assert(meta.ResolveShardingDDL(), check.IsTrue) @@ -179,7 +181,7 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.GetActiveDDLItem(table2), check.IsNil) c.Assert(meta.GetActiveDDLItem(table3), check.IsNil) c.Assert(meta.InSequenceSharding(), check.IsFalse) - pos, err = meta.ActiveDDLFirstPos() + location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.ErrorMatches, fmt.Sprintf("\\[.*\\] activeIdx %d larger than length of global DDLItems: .*", meta.ActiveIdx())) sqls, args = meta.FlushData(sourceID, tableID) @@ -198,12 +200,12 @@ func (t *testShardMetaSuite) TestShardingMetaWrongSequence(c *check.C) { table2 = "table2" meta = NewShardingMeta("", "") items = []*DDLItem{ - NewDDLItem(mysql.Position{filename, 1000}, []string{"ddl1"}, table1), - NewDDLItem(mysql.Position{filename, 1200}, []string{"ddl2"}, table1), - NewDDLItem(mysql.Position{filename, 1400}, []string{"ddl3"}, table1), - NewDDLItem(mysql.Position{filename, 1600}, []string{"ddl1"}, table2), - NewDDLItem(mysql.Position{filename, 1800}, []string{"ddl3"}, table2), - NewDDLItem(mysql.Position{filename, 2000}, []string{"ddl2"}, table2), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1000}}, []string{"ddl1"}, table1), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1200}}, []string{"ddl2"}, table1), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1400}}, []string{"ddl3"}, table1), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1600}}, []string{"ddl1"}, table2), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1800}}, []string{"ddl3"}, table2), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 2000}}, []string{"ddl2"}, table2), } ) @@ -252,8 +254,8 @@ func (t *testShardMetaSuite) TestFlushLoadMeta(c *check.C) { meta = NewShardingMeta(metaSchema, metaTable) loadedMeta = NewShardingMeta(metaSchema, metaTable) items = []*DDLItem{ - NewDDLItem(mysql.Position{filename, 1000}, []string{"ddl1"}, table1), - NewDDLItem(mysql.Position{filename, 1200}, []string{"ddl1"}, table2), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1000}}, []string{"ddl1"}, table1), + NewDDLItem(binlog.Location{Position: mysql.Position{filename, 1200}}, []string{"ddl1"}, table2), } ) for _, item := range items { diff --git a/syncer/sharding_group_test.go b/syncer/sharding_group_test.go index f5dc8686e5..960a72b993 100644 --- a/syncer/sharding_group_test.go +++ b/syncer/sharding_group_test.go @@ -18,6 +18,7 @@ import ( "github.com/siddontang/go-mysql/mysql" "github.com/pingcap/dm/dm/config" + "github.com/pingcap/dm/pkg/binlog" tcontext "github.com/pingcap/dm/pkg/context" ) @@ -40,25 +41,25 @@ func (t *testShardingGroupSuite) TestLowestFirstPosInGroups(c *C) { g1 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db1.tbl1", "db1.tbl2"}, nil, false) pos1 := mysql.Position{Name: "mysql-bin.000002", Pos: 123} endPos1 := mysql.Position{Name: "mysql-bin.000002", Pos: 456} - _, _, _, err := g1.TrySync("db1.tbl1", pos1, endPos1, ddls) + _, _, _, err := g1.TrySync("db1.tbl1", binlog.Location{Position: pos1}, binlog.Location{Position: endPos1}, ddls) c.Assert(err, IsNil) // lowest g2 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db2.tbl1", "db2.tbl2"}, nil, false) pos2 := mysql.Position{Name: "mysql-bin.000001", Pos: 123} endPos2 := mysql.Position{Name: "mysql-bin.000001", Pos: 456} - _, _, _, err = g2.TrySync("db2.tbl1", pos2, endPos2, ddls) + _, _, _, err = g2.TrySync("db2.tbl1", binlog.Location{Position: pos2}, binlog.Location{Position: endPos2}, ddls) c.Assert(err, IsNil) g3 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db3.tbl1", "db3.tbl2"}, nil, false) pos3 := mysql.Position{Name: "mysql-bin.000003", Pos: 123} endPos3 := mysql.Position{Name: "mysql-bin.000003", Pos: 456} - _, _, _, err = g3.TrySync("db3.tbl1", pos3, endPos3, ddls) + _, _, _, err = g3.TrySync("db3.tbl1", binlog.Location{Position: pos3}, binlog.Location{Position: endPos3}, ddls) c.Assert(err, IsNil) k.groups["db1.tbl"] = g1 k.groups["db2.tbl"] = g2 k.groups["db3.tbl"] = g3 - c.Assert(*k.lowestFirstPosInGroups(), DeepEquals, pos2) + c.Assert(k.lowestFirstLocationInGroups().Position, DeepEquals, pos2) } diff --git a/syncer/streamer_controller.go b/syncer/streamer_controller.go index 54fee33286..bbdc7f794d 100644 --- a/syncer/streamer_controller.go +++ b/syncer/streamer_controller.go @@ -69,20 +69,7 @@ func (r *remoteBinlogReader) generateStreamer(location binlog.Location) (streame r.tctx.L().Info("last slave connection", zap.Uint32("connection ID", lastSlaveConnectionID)) }() - // FIXME: can enable GTID if r.EnableGTID { - // NOTE: our (per-table based) checkpoint does not support GTID yet - //return nil, terror.ErrSyncerUnitRemoteSteamerWithGTID.Generate() - /* - var gtid mysql.GTIDSet - - var err error - gtid, err = mysql.ParseGTIDSet(r.flavor, location.GTIDSet) - if err != nil { - // TODO: use terror - return nil, err - } - */ streamer, err := r.reader.StartSyncGTID(location.GTIDSet.Origin()) if err != nil { return nil, err diff --git a/syncer/syncer.go b/syncer/syncer.go index 62ce196e1b..00c5f0a5f5 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1258,9 +1258,10 @@ func (s *Syncer) Run(ctx context.Context) (err error) { latestOp = xid currentLocation.Position.Pos = e.Header.LogPos - // TODO:update gtid + currentLocation.GTIDSet.Set(ev.GSet) s.tctx.L().Debug("", zap.String("event", "XID"), zap.Stringer("last location", lastLocation), log.WrapStringerField("location", currentLocation), log.WrapStringerField("gtid set", ev.GSet)) lastLocation.Position.Pos = e.Header.LogPos // update lastPos + lastLocation.GTIDSet.Set(ev.GSet) job := newXIDJob(currentLocation, currentLocation, traceID) err = s.addJobFunc(job) @@ -1477,9 +1478,10 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e Name: ec.lastLocation.Position.Name, Pos: ec.header.LogPos, }, - // TODO: use new ev.GSet? - //GTID: + GTIDSet: ec.lastLocation.GTIDSet, } + ec.currentLocation.GTIDSet.Set(ev.GSet) + sql := strings.TrimSpace(string(ev.Query)) usedSchema := string(ev.Schema) parseResult, err := s.parseDDLSQL(sql, ec.parser2, usedSchema) diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index 1a01951b12..23be037da3 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/dm/dm/config" "github.com/pingcap/dm/dm/pb" + "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/binlog/event" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" @@ -98,15 +99,15 @@ type MockStreamProducer struct { events []*replication.BinlogEvent } -func (mp *MockStreamProducer) generateStreamer(pos mysql.Position) (streamer2.Streamer, error) { - if pos.Pos == 4 { +func (mp *MockStreamProducer) generateStreamer(location binlog.Location) (streamer2.Streamer, error) { + if location.Position.Pos == 4 { return &MockStreamer{mp.events, 0}, nil } bytesLen := 0 idx := uint32(0) for i, e := range mp.events { bytesLen += len(e.RawData) - if pos.Pos == uint32(bytesLen) { + if location.Position.Pos == uint32(bytesLen) { idx = uint32(i) break } @@ -953,8 +954,8 @@ func (s *testSyncerSuite) TestGeneratedColumn(c *C) { syncer.toDBConns = []*DBConn{{baseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})}} syncer.reset() - syncer.streamerController = NewStreamerController(tcontext.Background(), syncer.syncCfg, syncer.fromDB, syncer.binlogType, syncer.cfg.RelayDir, syncer.timezone) - err = syncer.streamerController.Start(tcontext.Background(), pos) + syncer.streamerController = NewStreamerController(tcontext.Background(), syncer.syncCfg, true, syncer.fromDB, syncer.binlogType, syncer.cfg.RelayDir, syncer.timezone) + err = syncer.streamerController.Start(tcontext.Background(), binlog.Location{Position: pos}) c.Assert(err, IsNil) for _, testCase := range testCases { @@ -1144,7 +1145,7 @@ func (s *testSyncerSuite) TestRun(c *C) { } mockStreamerProducer := &MockStreamProducer{s.generateEvents(events1, c)} - mockStreamer, err := mockStreamerProducer.generateStreamer(mysql.Position{}) + mockStreamer, err := mockStreamerProducer.generateStreamer(binlog.Location{}) c.Assert(err, IsNil) syncer.streamerController = &StreamerController{ streamerProducer: mockStreamerProducer, @@ -1247,7 +1248,7 @@ func (s *testSyncerSuite) TestRun(c *C) { // simulate `syncer.Resume` here, but doesn't reset database conns syncer.reset() mockStreamerProducer = &MockStreamProducer{s.generateEvents(events2, c)} - mockStreamer, err = mockStreamerProducer.generateStreamer(mysql.Position{}) + mockStreamer, err = mockStreamerProducer.generateStreamer(binlog.Location{}) c.Assert(err, IsNil) syncer.streamerController = &StreamerController{ streamerProducer: mockStreamerProducer, From 0b92af954158debfaeed7a45c3f3229e7c8db16c Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Fri, 6 Mar 2020 16:55:01 +0800 Subject: [PATCH 07/35] minor fix --- dm/worker/worker.go | 1 + pkg/binlog/position.go | 19 +++++++++++-------- pkg/utils/mydumper.go | 2 +- syncer/checkpoint.go | 13 +++++++++---- syncer/syncer.go | 3 +-- 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/dm/worker/worker.go b/dm/worker/worker.go index b549a8cbc7..777d87508b 100644 --- a/dm/worker/worker.go +++ b/dm/worker/worker.go @@ -609,6 +609,7 @@ func (w *Worker) copyConfigFromWorker(cfg *config.SubTaskConfig) { cfg.ServerID = w.cfg.ServerID cfg.RelayDir = w.cfg.RelayDir cfg.EnableGTID = w.cfg.EnableGTID + cfg.SyncerConfig.EnableGTID = w.cfg.EnableGTID cfg.UseRelay = w.cfg.EnableRelay // we can remove this from SubTaskConfig later, because syncer will always read from relay diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 01a39b7cec..ffc31d2e69 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -179,16 +179,19 @@ func (p Location) String() string { // 0 if point1 is equal to point2 // -1 if point1 is less than point2 func CompareLocation(location1, location2 Location) int { - // TODO: compare gtid - if len(location1.String()) != 0 && len(location2.String()) != 0 { + fmt.Println(location1) + fmt.Println(location2) + if location1.GTIDSet != nil && location2.GTIDSet != nil { contain1 := location1.GTIDSet.Contain(location2.GTIDSet) - if contain1 { - return 1 - } - contain2 := location2.GTIDSet.Contain(location1.GTIDSet) - if contain2 { - return -1 + if contain1 && contain2 { + // gtidSet1 contains gtidSet2 and gtidSet2 contains gtidSet1 means gtidSet1 equals to gtidSet2, then need to compare by position. + } else { + if contain1 { + return 1 + } else if contain2 { + return -1 + } } } diff --git a/pkg/utils/mydumper.go b/pkg/utils/mydumper.go index 7df801f1fc..08d0347917 100644 --- a/pkg/utils/mydumper.go +++ b/pkg/utils/mydumper.go @@ -65,7 +65,7 @@ func ParseMetaData(filename string) (*mysql.Position, string, error) { case "Log": pos.Name = value case "Pos": - pos64, err := strconv.ParseUint(parts[1], 10, 32) + pos64, err := strconv.ParseUint(value, 10, 32) if err != nil { return nil, "", terror.ErrParseMydumperMeta.Generate(err) } diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 248b1dfc2d..d50f008b4d 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -586,6 +586,7 @@ func (cp *RemoteCheckPoint) createTable(tctx *tcontext.Context) error { cp_table VARCHAR(128) NOT NULL, binlog_name VARCHAR(128), binlog_pos INT UNSIGNED, + binlog_gtid VARCHAR(256), table_info JSON NOT NULL, is_global BOOLEAN, create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -624,7 +625,7 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T cpTable string binlogName string binlogPos uint32 - binlogGTIDSet gtid.Set + binlogGTIDSet string tiBytes []byte isGlobal bool ) @@ -633,12 +634,16 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T if err != nil { return terror.WithScope(terror.DBErrorAdapt(err, terror.ErrDBDriverError), terror.ScopeDownstream) } + gset, err := gtid.ParserGTID(cp.cfg.Flavor, binlogGTIDSet) + if err != nil { + return err + } location := pbinlog.Location{ Position: mysql.Position{ Name: binlogName, Pos: binlogPos, }, - GTIDSet: binlogGTIDSet, + GTIDSet: gset, } if isGlobal { if pbinlog.CompareLocation(location, minLocation) > 0 { @@ -720,7 +725,7 @@ func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location pbin // to keep `create_time`, `update_time` correctly sql2 := `INSERT INTO ` + cp.tableName + ` (id, cp_schema, cp_table, binlog_name, binlog_pos, binlog_gtid, table_info, is_global) VALUES - (?, ?, ?, ?, ?, ?, ?) + (?, ?, ?, ?, ?, ?, ?, ?) ON DUPLICATE KEY UPDATE binlog_name = VALUES(binlog_name), binlog_pos = VALUES(binlog_pos), @@ -737,7 +742,7 @@ func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location pbin if len(tiBytes) == 0 { tiBytes = []byte("null") } - args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, location.GTIDSet, tiBytes, isGlobal} + args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, location.GTIDSet.String(), tiBytes, isGlobal} return sql2, args } diff --git a/syncer/syncer.go b/syncer/syncer.go index 00c5f0a5f5..fc56e2f0d0 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1698,8 +1698,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e Name: ec.currentLocation.Position.Name, Pos: ec.currentLocation.Position.Pos - ec.header.EventSize, }, - // TODO: use what GTID? - // GTID: currentLocation + GTIDSet: ec.lastLocation.GTIDSet, } source, _ = GenTableID(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name) From ce6494d8fc8dc19c8c52aa4797736bd8871a9a68 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Fri, 6 Mar 2020 17:14:48 +0800 Subject: [PATCH 08/35] enable gtid in test --- pkg/binlog/position.go | 5 ++--- tests/ha/conf/source1.toml | 2 +- tests/incremental_mode/conf/source1.toml | 2 +- tests/online_ddl/conf/source1.toml | 2 +- tests/sharding/conf/source1.toml | 2 +- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index ffc31d2e69..8c778f400e 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -179,13 +179,12 @@ func (p Location) String() string { // 0 if point1 is equal to point2 // -1 if point1 is less than point2 func CompareLocation(location1, location2 Location) int { - fmt.Println(location1) - fmt.Println(location2) if location1.GTIDSet != nil && location2.GTIDSet != nil { contain1 := location1.GTIDSet.Contain(location2.GTIDSet) contain2 := location2.GTIDSet.Contain(location1.GTIDSet) if contain1 && contain2 { - // gtidSet1 contains gtidSet2 and gtidSet2 contains gtidSet1 means gtidSet1 equals to gtidSet2, then need to compare by position. + // gtidSet1 contains gtidSet2 and gtidSet2 contains gtidSet1 means gtidSet1 equals to gtidSet2, + // then need to compare by position. } else { if contain1 { return 1 diff --git a/tests/ha/conf/source1.toml b/tests/ha/conf/source1.toml index aff4a899ca..89ec2db2c0 100644 --- a/tests/ha/conf/source1.toml +++ b/tests/ha/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true # relay-binlog-name = "" # relay-binlog-gtid = "" enable-relay = false diff --git a/tests/incremental_mode/conf/source1.toml b/tests/incremental_mode/conf/source1.toml index e9748fb168..7b08068ef2 100644 --- a/tests/incremental_mode/conf/source1.toml +++ b/tests/incremental_mode/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" diff --git a/tests/online_ddl/conf/source1.toml b/tests/online_ddl/conf/source1.toml index 87078f17a5..e167626d4d 100644 --- a/tests/online_ddl/conf/source1.toml +++ b/tests/online_ddl/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" diff --git a/tests/sharding/conf/source1.toml b/tests/sharding/conf/source1.toml index e9748fb168..7b08068ef2 100644 --- a/tests/sharding/conf/source1.toml +++ b/tests/sharding/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" From b7058673d85613b58b4d8a4b6b0c14846877d33f Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 10:31:37 +0800 Subject: [PATCH 09/35] fix nil point --- dm/config/task.go | 1 + pkg/binlog/position.go | 5 +- pkg/gtid/gtid.go | 87 +++++++++++++++----------- syncer/checkpoint.go | 45 ++++++++++--- syncer/sharding-meta/shardmeta_test.go | 2 +- syncer/syncer.go | 2 +- 6 files changed, 94 insertions(+), 48 deletions(-) diff --git a/dm/config/task.go b/dm/config/task.go index dfbe8dc737..425f1d9cf5 100644 --- a/dm/config/task.go +++ b/dm/config/task.go @@ -65,6 +65,7 @@ var ( type Meta struct { BinLogName string `toml:"binlog-name" yaml:"binlog-name"` BinLogPos uint32 `toml:"binlog-pos" yaml:"binlog-pos"` + BinLogGTID string `toml:"binlog-gtid" yaml:"binlog-gtid"` } // Verify does verification on configs diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 8c778f400e..9489640106 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -171,7 +171,7 @@ type Location struct { } func (p Location) String() string { - return fmt.Sprintf("Position: %v, GTID: %s", p.Position, p.GTIDSet) + return fmt.Sprintf("Position: %v, GTIDSet: %s", p.Position, p.GTIDSet) } // CompareLocation returns: @@ -179,7 +179,8 @@ func (p Location) String() string { // 0 if point1 is equal to point2 // -1 if point1 is less than point2 func CompareLocation(location1, location2 Location) int { - if location1.GTIDSet != nil && location2.GTIDSet != nil { + if location1.GTIDSet != nil && len(location1.GTIDSet.String()) != 0 && + location2.GTIDSet != nil && len(location2.GTIDSet.String()) != 0 { contain1 := location1.GTIDSet.Contain(location2.GTIDSet) contain2 := location2.GTIDSet.Contain(location1.GTIDSet) if contain1 && contain2 { diff --git a/pkg/gtid/gtid.go b/pkg/gtid/gtid.go index e7d133e784..3b5d89e04d 100644 --- a/pkg/gtid/gtid.go +++ b/pkg/gtid/gtid.go @@ -58,9 +58,9 @@ func ParserGTID(flavor, gtidStr string) (Set, error) { switch flavor { case mysql.MariaDBFlavor: - m = &mariadbGTIDSet{} + m = &MariadbGTIDSet{} case mysql.MySQLFlavor: - m = &mySQLGTIDSet{} + m = &MySQLGTIDSet{} default: return nil, terror.ErrNotSupportedFlavor.Generate(flavor) } @@ -72,12 +72,12 @@ func ParserGTID(flavor, gtidStr string) (Set, error) { // MySQLGTIDSet wraps mysql.MysqlGTIDSet to implement gtidSet interface // extend some functions to retrieve and compute an intersection with other MySQL GTID Set -type mySQLGTIDSet struct { +type MySQLGTIDSet struct { set *mysql.MysqlGTIDSet } -// replace g by other -func (g *mySQLGTIDSet) Set(other mysql.GTIDSet) error { +// Set implements Set.Set, replace g by other +func (g *MySQLGTIDSet) Set(other mysql.GTIDSet) error { if other == nil { return nil } @@ -91,12 +91,13 @@ func (g *mySQLGTIDSet) Set(other mysql.GTIDSet) error { return nil } -func (g *mySQLGTIDSet) Replace(other Set, masters []interface{}) error { +// Replace implements Set.Replace +func (g *MySQLGTIDSet) Replace(other Set, masters []interface{}) error { if other == nil { return nil } - otherGS, ok := other.(*mySQLGTIDSet) + otherGS, ok := other.(*MySQLGTIDSet) if !ok { return terror.ErrNotMySQLGTID.Generate(other) } @@ -124,29 +125,32 @@ func (g *mySQLGTIDSet) Replace(other Set, masters []interface{}) error { return nil } -func (g *mySQLGTIDSet) delete(uuid string) { +func (g *MySQLGTIDSet) delete(uuid string) { delete(g.set.Sets, uuid) } -func (g *mySQLGTIDSet) get(uuid string) (*mysql.UUIDSet, bool) { +func (g *MySQLGTIDSet) get(uuid string) (*mysql.UUIDSet, bool) { uuidSet, ok := g.set.Sets[uuid] return uuidSet, ok } -func (g *mySQLGTIDSet) Clone() Set { - return &mySQLGTIDSet{ +// Clone implements Set.Clone +func (g *MySQLGTIDSet) Clone() Set { + return &MySQLGTIDSet{ set: g.set.Clone().(*mysql.MysqlGTIDSet), } } -func (g *mySQLGTIDSet) Origin() mysql.GTIDSet { +// Origin implements Set.Origin +func (g *MySQLGTIDSet) Origin() mysql.GTIDSet { return g.set.Clone().(*mysql.MysqlGTIDSet) } -func (g *mySQLGTIDSet) Equal(other Set) bool { +// Equal implements Set.Equal +func (g *MySQLGTIDSet) Equal(other Set) bool { otherIsNil := other == nil if !otherIsNil { - otherGS, ok := other.(*mySQLGTIDSet) + otherGS, ok := other.(*MySQLGTIDSet) if !ok { return false } @@ -162,10 +166,11 @@ func (g *mySQLGTIDSet) Equal(other Set) bool { return g.set.Equal(other.Origin()) } -func (g *mySQLGTIDSet) Contain(other Set) bool { +// Contain implements Set.Contain +func (g *MySQLGTIDSet) Contain(other Set) bool { otherIsNil := other == nil if !otherIsNil { - otherGs, ok := other.(*mySQLGTIDSet) + otherGs, ok := other.(*MySQLGTIDSet) if !ok { return false } @@ -179,14 +184,15 @@ func (g *mySQLGTIDSet) Contain(other Set) bool { return g.set.Contain(other.Origin()) } -func (g *mySQLGTIDSet) Truncate(end Set) error { +// Truncate implements Set.Truncate +func (g *MySQLGTIDSet) Truncate(end Set) error { if end == nil { return nil // do nothing } if !g.Contain(end) { return terror.ErrGTIDTruncateInvalid.Generate(g, end) } - endGs := end.(*mySQLGTIDSet) // already verify the type is `*mySQLGTIDSet` in `Contain`. + endGs := end.(*MySQLGTIDSet) // already verify the type is `*MySQLGTIDSet` in `Contain`. if endGs == nil { return nil // do nothing } @@ -209,7 +215,7 @@ func (g *mySQLGTIDSet) Truncate(end Set) error { return nil } -func (g *mySQLGTIDSet) String() string { +func (g *MySQLGTIDSet) String() string { if g.set == nil { return "" } @@ -217,12 +223,15 @@ func (g *mySQLGTIDSet) String() string { } /************************ mariadb gtid set ***************************/ -type mariadbGTIDSet struct { + +// MariadbGTIDSet wraps mysql.MariadbGTIDSet to implement gtidSet interface +// extend some functions to retrieve and compute an intersection with other Mariadb GTID Set +type MariadbGTIDSet struct { set *mysql.MariadbGTIDSet } -// replace g by other -func (m *mariadbGTIDSet) Set(other mysql.GTIDSet) error { +// Set implements Set.Set, replace g by other +func (m *MariadbGTIDSet) Set(other mysql.GTIDSet) error { if other == nil { return nil } @@ -236,12 +245,13 @@ func (m *mariadbGTIDSet) Set(other mysql.GTIDSet) error { return nil } -func (m *mariadbGTIDSet) Replace(other Set, masters []interface{}) error { +// Replace implements Set.Replace +func (m *MariadbGTIDSet) Replace(other Set, masters []interface{}) error { if other == nil { return nil } - otherGS, ok := other.(*mariadbGTIDSet) + otherGS, ok := other.(*MariadbGTIDSet) if !ok { return terror.ErrNotMariaDBGTID.Generate(other) } @@ -269,29 +279,32 @@ func (m *mariadbGTIDSet) Replace(other Set, masters []interface{}) error { return nil } -func (m *mariadbGTIDSet) delete(domainID uint32) { +func (m *MariadbGTIDSet) delete(domainID uint32) { delete(m.set.Sets, domainID) } -func (m *mariadbGTIDSet) get(domainID uint32) (*mysql.MariadbGTID, bool) { +func (m *MariadbGTIDSet) get(domainID uint32) (*mysql.MariadbGTID, bool) { gtid, ok := m.set.Sets[domainID] return gtid, ok } -func (m *mariadbGTIDSet) Clone() Set { - return &mariadbGTIDSet{ +// Clone implements Set.Clone +func (m *MariadbGTIDSet) Clone() Set { + return &MariadbGTIDSet{ set: m.set.Clone().(*mysql.MariadbGTIDSet), } } -func (m *mariadbGTIDSet) Origin() mysql.GTIDSet { +// Origin implements Set.Origin +func (m *MariadbGTIDSet) Origin() mysql.GTIDSet { return m.set.Clone().(*mysql.MariadbGTIDSet) } -func (m *mariadbGTIDSet) Equal(other Set) bool { +// Equal implements Set.Equal +func (m *MariadbGTIDSet) Equal(other Set) bool { otherIsNil := other == nil if !otherIsNil { - otherGS, ok := other.(*mariadbGTIDSet) + otherGS, ok := other.(*MariadbGTIDSet) if !ok { return false } @@ -307,10 +320,11 @@ func (m *mariadbGTIDSet) Equal(other Set) bool { return m.set.Equal(other.Origin()) } -func (m *mariadbGTIDSet) Contain(other Set) bool { +// Contain implements Set.Contain +func (m *MariadbGTIDSet) Contain(other Set) bool { otherIsNil := other == nil if !otherIsNil { - otherGS, ok := other.(*mariadbGTIDSet) + otherGS, ok := other.(*MariadbGTIDSet) if !ok { return false } @@ -324,14 +338,15 @@ func (m *mariadbGTIDSet) Contain(other Set) bool { return m.set.Contain(other.Origin()) } -func (m *mariadbGTIDSet) Truncate(end Set) error { +// Truncate implements Set.Truncate +func (m *MariadbGTIDSet) Truncate(end Set) error { if end == nil { return nil // do nothing } if !m.Contain(end) { return terror.ErrGTIDTruncateInvalid.Generate(m, end) } - endGs := end.(*mariadbGTIDSet) // already verify the type is `*mariadbGTIDSet` in `Contain`. + endGs := end.(*MariadbGTIDSet) // already verify the type is `*MariadbGTIDSet` in `Contain`. if endGs == nil { return nil // do nothing } @@ -350,7 +365,7 @@ func (m *mariadbGTIDSet) Truncate(end Set) error { return nil } -func (m *mariadbGTIDSet) String() string { +func (m *MariadbGTIDSet) String() string { if m.set == nil { return "" } diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index d50f008b4d..3216e71d5a 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -52,9 +52,6 @@ var ( globalCpTable = "" // global checkpoint's cp_table maxCheckPointTimeout = "1m" minPosition = mysql.Position{Pos: 4} - minLocation = pbinlog.Location{ - Position: minPosition, - } maxCheckPointSaveTime = 30 * time.Second ) @@ -262,7 +259,7 @@ func NewRemoteCheckPoint(tctx *tcontext.Context, cfg *config.SubTaskConfig, id s tableName: dbutil.TableName(cfg.MetaSchema, cfg.Name+"_syncer_checkpoint"), id: id, points: make(map[string]map[string]*binlogPoint), - globalPoint: newBinlogPoint(minLocation, minLocation, nil, nil), + globalPoint: newBinlogPoint(minLocation(cfg.Flavor), minLocation(cfg.Flavor), nil, nil), logCtx: tcontext.Background().WithLogger(tctx.L().WithFields(zap.String("component", "remote checkpoint"))), } @@ -308,7 +305,7 @@ func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { return err } - cp.globalPoint = newBinlogPoint(minLocation, minLocation, nil, nil) + cp.globalPoint = newBinlogPoint(minLocation(cp.cfg.Flavor), minLocation(cp.cfg.Flavor), nil, nil) cp.points = make(map[string]map[string]*binlogPoint) @@ -342,7 +339,7 @@ func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, loc } point, ok := mSchema[sourceTable] if !ok { - mSchema[sourceTable] = newBinlogPoint(location, minLocation, ti, nil) + mSchema[sourceTable] = newBinlogPoint(location, minLocation(cp.cfg.Flavor), ti, nil) } else if err := point.save(location, ti); err != nil { cp.logCtx.L().Error("fail to save table point", zap.String("schema", sourceSchema), zap.String("table", sourceTable), log.ShortError(err)) } @@ -646,7 +643,7 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T GTIDSet: gset, } if isGlobal { - if pbinlog.CompareLocation(location, minLocation) > 0 { + if pbinlog.CompareLocation(location, minLocation(cp.cfg.Flavor)) > 0 { cp.globalPoint = newBinlogPoint(location, location, nil, nil) cp.logCtx.L().Info("fetch global checkpoint from DB", log.WrapStringerField("global checkpoint", cp.globalPoint)) } @@ -696,14 +693,24 @@ func (cp *RemoteCheckPoint) LoadMeta() error { // load meta from task config if cp.cfg.Meta == nil { cp.logCtx.L().Warn("don't set meta in increment task-mode") + location1 := minLocation(cp.cfg.Flavor) + cp.globalPoint = newBinlogPoint(location1, location1, nil, nil) return nil } + gset, err := gtid.ParserGTID(cp.cfg.Flavor, cp.cfg.Meta.BinLogGTID) + if err != nil { + return err + } + if gset == nil { + gset = minGTIDSet(cp.cfg.Flavor) + } + location = &pbinlog.Location{ Position: mysql.Position{ Name: cp.cfg.Meta.BinLogName, Pos: cp.cfg.Meta.BinLogPos, }, - // GTID: , + GTIDSet: gset, } default: // should not go here (syncer is only used in `all` or `incremental` mode) @@ -765,3 +772,25 @@ func (cp *RemoteCheckPoint) parseMetaData() (*pbinlog.Location, error) { GTIDSet: gset, }, nil } + +func minLocation(flavor string) pbinlog.Location { + if flavor == mysql.MySQLFlavor { + return pbinlog.Location{ + Position: minPosition, + GTIDSet: minGTIDSet(flavor), + } + } + + return pbinlog.Location{ + Position: minPosition, + GTIDSet: minGTIDSet(flavor), + } +} + +func minGTIDSet(flavor string) gtid.Set { + if flavor == mysql.MySQLFlavor { + return >id.MySQLGTIDSet{} + } + + return >id.MariadbGTIDSet{} +} diff --git a/syncer/sharding-meta/shardmeta_test.go b/syncer/sharding-meta/shardmeta_test.go index 69f5149499..c1de16871d 100644 --- a/syncer/sharding-meta/shardmeta_test.go +++ b/syncer/sharding-meta/shardmeta_test.go @@ -80,7 +80,7 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.InSequenceSharding(), check.IsTrue) location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(location.Position, check.DeepEquals, items[0].FirstLocation) + c.Assert(location.Position, check.DeepEquals, items[0].FirstLocation.Position) // find synced in shrading group, and call ShardingMeta.ResolveShardingDDL c.Assert(meta.ResolveShardingDDL(), check.IsFalse) diff --git a/syncer/syncer.go b/syncer/syncer.go index b6d531c8a9..6aa3723a05 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -425,7 +425,7 @@ func (s *Syncer) initShardingGroups() error { func (s *Syncer) IsFreshTask(ctx context.Context) (bool, error) { globalPoint := s.checkpoint.GlobalPoint() tablePoint := s.checkpoint.TablePoint() - return binlog.CompareLocation(globalPoint, minLocation) <= 0 && len(tablePoint) == 0, nil + return binlog.CompareLocation(globalPoint, minLocation(s.cfg.Flavor)) <= 0 && len(tablePoint) == 0, nil } func (s *Syncer) reset() { From 6b244aa1c8358a3e4db563149e70618f7368cd91 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 10:54:37 +0800 Subject: [PATCH 10/35] update unit test --- pkg/gtid/gtid_test.go | 52 +++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/pkg/gtid/gtid_test.go b/pkg/gtid/gtid_test.go index e660c7dc0a..7ec1a004d0 100644 --- a/pkg/gtid/gtid_test.go +++ b/pkg/gtid/gtid_test.go @@ -77,9 +77,9 @@ func (s *testGTIDSuite) TestGTID(c *C) { func (s *testGTIDSuite) TestMySQLGTIDEqual(c *C) { var ( - g1 *mySQLGTIDSet - g2 *mySQLGTIDSet - gMaria *mariadbGTIDSet + g1 *MySQLGTIDSet + g2 *MySQLGTIDSet + gMaria *MariadbGTIDSet ) c.Assert(g1.Equal(nil), IsTrue) @@ -88,20 +88,20 @@ func (s *testGTIDSuite) TestMySQLGTIDEqual(c *C) { gSet, err := ParserGTID("mysql", "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,406a3f61-690d-11e7-87c5-6c92bf46f384:1-94321383,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,686e1ab6-c47e-11e7-a42c-6c92bf46f384:1-34981190,03fc0263-28c7-11e7-a653-6c0b84d59f30:1-7041423,05474d3c-28c7-11e7-8352-203db246dd3d:1-170,10b039fc-c843-11e7-8f6a-1866daf8d810:1-308290454") c.Assert(err, IsNil) - g1 = gSet.(*mySQLGTIDSet) + g1 = gSet.(*MySQLGTIDSet) c.Assert(g1.Equal(g2), IsFalse) gSet, err = ParserGTID("mysql", "03fc0263-28c7-11e7-a653-6c0b84d59f30:1-7041423,05474d3c-28c7-11e7-8352-203db246dd3d:1-170,10b039fc-c843-11e7-8f6a-1866daf8d810:1-308290454,3ccc475b-2343-11e7-be21-6c0b84d59f30:1-14,406a3f61-690d-11e7-87c5-6c92bf46f384:1-94321383,53bfca22-690d-11e7-8a62-18ded7a37b78:1-495,686e1ab6-c47e-11e7-a42c-6c92bf46f384:1-34981190") c.Assert(err, IsNil) - g2 = gSet.(*mySQLGTIDSet) + g2 = gSet.(*MySQLGTIDSet) c.Assert(g1.Equal(g2), IsTrue) } func (s *testGTIDSuite) TestMariaGTIDEqual(c *C) { var ( - g1 *mariadbGTIDSet - g2 *mariadbGTIDSet - gMySQL *mySQLGTIDSet + g1 *MariadbGTIDSet + g2 *MariadbGTIDSet + gMySQL *MySQLGTIDSet ) c.Assert(g1.Equal(nil), IsTrue) @@ -110,20 +110,20 @@ func (s *testGTIDSuite) TestMariaGTIDEqual(c *C) { gSet, err := ParserGTID("mariadb", "1-1-1,2-2-2") c.Assert(err, IsNil) - g1 = gSet.(*mariadbGTIDSet) + g1 = gSet.(*MariadbGTIDSet) c.Assert(g1.Equal(g2), IsFalse) gSet, err = ParserGTID("mariadb", "2-2-2,1-1-1") c.Assert(err, IsNil) - g2 = gSet.(*mariadbGTIDSet) + g2 = gSet.(*MariadbGTIDSet) c.Assert(g1.Equal(g2), IsTrue) } func (s *testGTIDSuite) TestMySQLGTIDContain(c *C) { var ( - g1 *mySQLGTIDSet - g2 *mySQLGTIDSet - gMaria *mariadbGTIDSet + g1 *MySQLGTIDSet + g2 *MySQLGTIDSet + gMaria *MariadbGTIDSet ) c.Assert(g1.Contain(g2), IsTrue) // all nil c.Assert(g1.Contain(gMaria), IsFalse) // incompatible @@ -131,30 +131,30 @@ func (s *testGTIDSuite) TestMySQLGTIDContain(c *C) { // one nil gSet, err := ParserGTID("mysql", "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-10,406a3f61-690d-11e7-87c5-6c92bf46f384:1-10") c.Assert(err, IsNil) - g1 = gSet.(*mySQLGTIDSet) + g1 = gSet.(*MySQLGTIDSet) c.Assert(g1.Contain(g2), IsTrue) c.Assert(g2.Contain(g1), IsFalse) // contain gSet, err = ParserGTID("mysql", "3ccc475b-2343-11e7-be21-6c0b84d59f30:1-5,406a3f61-690d-11e7-87c5-6c92bf46f384:1-10") c.Assert(err, IsNil) - g2 = gSet.(*mySQLGTIDSet) + g2 = gSet.(*MySQLGTIDSet) c.Assert(g1.Contain(g2), IsTrue) c.Assert(g2.Contain(g1), IsFalse) // not contain gSet, err = ParserGTID("mysql", "03fc0263-28c7-11e7-a653-6c0b84d59f30:1-5,406a3f61-690d-11e7-87c5-6c92bf46f384:1-10") c.Assert(err, IsNil) - g2 = gSet.(*mySQLGTIDSet) + g2 = gSet.(*MySQLGTIDSet) c.Assert(g1.Contain(g2), IsFalse) c.Assert(g2.Contain(g1), IsFalse) } func (s *testGTIDSuite) TestMairaGTIDContain(c *C) { var ( - g1 *mariadbGTIDSet - g2 *mariadbGTIDSet - gMySQL *mySQLGTIDSet + g1 *MariadbGTIDSet + g2 *MariadbGTIDSet + gMySQL *MySQLGTIDSet ) c.Assert(g1.Contain(g2), IsTrue) // all nil c.Assert(g1.Contain(gMySQL), IsFalse) // incompatible @@ -162,21 +162,21 @@ func (s *testGTIDSuite) TestMairaGTIDContain(c *C) { // one nil gSet, err := ParserGTID("mariadb", "1-1-1,2-2-2") c.Assert(err, IsNil) - g1 = gSet.(*mariadbGTIDSet) + g1 = gSet.(*MariadbGTIDSet) c.Assert(g1.Contain(g2), IsTrue) c.Assert(g2.Contain(g1), IsFalse) // contain gSet, err = ParserGTID("mariadb", "1-1-1,2-2-1") c.Assert(err, IsNil) - g2 = gSet.(*mariadbGTIDSet) + g2 = gSet.(*MariadbGTIDSet) c.Assert(g1.Contain(g2), IsTrue) c.Assert(g2.Contain(g1), IsFalse) // not contain gSet, err = ParserGTID("mariadb", "1-1-2,2-2-1") c.Assert(err, IsNil) - g2 = gSet.(*mariadbGTIDSet) + g2 = gSet.(*MariadbGTIDSet) c.Assert(g1.Contain(g2), IsFalse) c.Assert(g2.Contain(g1), IsFalse) } @@ -186,9 +186,9 @@ func (s *testGTIDSuite) TestMySQLGTIDTruncate(c *C) { flavor = "mysql" g1, _ = ParserGTID(flavor, "00c04543-f584-11e9-a765-0242ac120002:100") g2, _ = ParserGTID(flavor, "00c04543-f584-11e9-a765-0242ac120002:100") - gNil *mySQLGTIDSet + gNil *MySQLGTIDSet gEmpty, _ = ParserGTID(flavor, "") - gMariaDBNil *mariadbGTIDSet + gMariaDBNil *MariadbGTIDSet ) // truncate to nil or empty GTID sets has no effect c.Assert(g1.Truncate(nil), IsNil) @@ -284,9 +284,9 @@ func (s *testGTIDSuite) TestMariaDBGTIDTruncate(c *C) { flavor = "mariadb" g1, _ = ParserGTID(flavor, "1-2-3") g2, _ = ParserGTID(flavor, "1-2-3") - gNil *mariadbGTIDSet + gNil *MariadbGTIDSet gEmpty, _ = ParserGTID(flavor, "") - gMySQLNil *mySQLGTIDSet + gMySQLNil *MySQLGTIDSet ) // truncate to nil or empty GTID sets has no effect c.Assert(g1.Truncate(nil), IsNil) From 556a424782971aa5beb5c64366d1c59a0306d561 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 15:06:31 +0800 Subject: [PATCH 11/35] fix increment test --- dm/worker/server.go | 2 +- pkg/binlog/position.go | 13 +++++-- pkg/gtid/gtid.go | 6 ++++ syncer/checkpoint.go | 9 ++++- syncer/job.go | 31 +++++++++------- syncer/sharding-meta/shardmeta_test.go | 8 ++--- syncer/syncer.go | 45 ++++++++++++------------ tests/incremental_mode/conf/dm-task.yaml | 2 ++ tests/incremental_mode/run.sh | 8 +++++ 9 files changed, 80 insertions(+), 44 deletions(-) diff --git a/dm/worker/server.go b/dm/worker/server.go index 918c6dfeff..3a7ca565ff 100644 --- a/dm/worker/server.go +++ b/dm/worker/server.go @@ -808,7 +808,7 @@ func makeCommonWorkerResponse(reqErr error) *pb.CommonWorkerResponse { // all subTask in subTaskCfgs should have same source // this function return the min position in all subtasks, used for relay's position -// Notes: used for relay, so don't need to use GTID +// TODO: get min gtidSet func getMinPosInAllSubTasks(ctx context.Context, subTaskCfgs []*config.SubTaskConfig) (minPos *mysql.Position, err error) { for _, subTaskCfg := range subTaskCfgs { pos, err := getMinPosForSubTaskFunc(ctx, subTaskCfg) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 9489640106..a8926d376d 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -170,8 +170,17 @@ type Location struct { GTIDSet gtid.Set } -func (p Location) String() string { - return fmt.Sprintf("Position: %v, GTIDSet: %s", p.Position, p.GTIDSet) +func (l Location) String() string { + return fmt.Sprintf("Position: %v, GTIDSet: %s", l.Position, l.GTIDSet) +} + +// Clone clones a same Location +func (l Location) Clone() Location { + newGTIDSet := l.GTIDSet.Clone() + return Location{ + Position: l.Position, + GTIDSet: newGTIDSet, + } } // CompareLocation returns: diff --git a/pkg/gtid/gtid.go b/pkg/gtid/gtid.go index 3b5d89e04d..98f4e82aff 100644 --- a/pkg/gtid/gtid.go +++ b/pkg/gtid/gtid.go @@ -136,6 +136,9 @@ func (g *MySQLGTIDSet) get(uuid string) (*mysql.UUIDSet, bool) { // Clone implements Set.Clone func (g *MySQLGTIDSet) Clone() Set { + if g.set == nil { + return &MySQLGTIDSet{} + } return &MySQLGTIDSet{ set: g.set.Clone().(*mysql.MysqlGTIDSet), } @@ -290,6 +293,9 @@ func (m *MariadbGTIDSet) get(domainID uint32) (*mysql.MariadbGTID, bool) { // Clone implements Set.Clone func (m *MariadbGTIDSet) Clone() Set { + if m.set == nil { + return &MariadbGTIDSet{} + } return &MariadbGTIDSet{ set: m.set.Clone().(*mysql.MariadbGTIDSet), } diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 3216e71d5a..5d34141c75 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -327,7 +327,8 @@ func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, loc } */ if pbinlog.CompareLocation(cp.globalPoint.location, location) > 0 { - panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) + //panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) + cp.logCtx.L().Error(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) } // we save table checkpoint while we meet DDL or DML @@ -513,6 +514,9 @@ func (cp *RemoteCheckPoint) TablePoint() map[string]map[string]pbinlog.Location // FlushedGlobalPoint implements CheckPoint.FlushedGlobalPoint func (cp *RemoteCheckPoint) FlushedGlobalPoint() pbinlog.Location { + cp.RLock() + defer cp.RUnlock() + return cp.globalPoint.FlushedMySQLLocation() } @@ -598,6 +602,9 @@ func (cp *RemoteCheckPoint) createTable(tctx *tcontext.Context) error { // Load implements CheckPoint.Load func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.Tracker) error { + cp.Lock() + defer cp.Unlock() + query := `SELECT cp_schema, cp_table, binlog_name, binlog_pos, binlog_gtid, table_info, is_global FROM ` + cp.tableName + ` WHERE id = ?` rows, err := cp.dbConn.querySQL(tctx, query, cp.id) defer func() { diff --git a/syncer/job.go b/syncer/job.go index df854911e7..9bb6c784f4 100644 --- a/syncer/job.go +++ b/syncer/job.go @@ -83,12 +83,9 @@ func (j *job) String() string { } func newJob(tp opType, sourceSchema, sourceTable, targetSchema, targetTable, sql string, args []interface{}, key string, location, cmdLocation binlog.Location, traceID string) *job { - /* - var gs gtid.Set - if currentGtidSet != nil { - gs = currentGtidSet.Clone() - } - */ + location1 := location.Clone() + cmdLocation1 := cmdLocation.Clone() + return &job{ tp: tp, sourceSchema: sourceSchema, @@ -98,19 +95,22 @@ func newJob(tp opType, sourceSchema, sourceTable, targetSchema, targetTable, sql sql: sql, args: args, key: key, - location: location, - currentLocation: cmdLocation, + location: location1, + currentLocation: cmdLocation1, retry: true, traceID: traceID, } } func newDDLJob(ddlInfo *shardingDDLInfo, ddls []string, location, cmdLocation binlog.Location, traceID string) *job { + location1 := location.Clone() + cmdLocation1 := cmdLocation.Clone() + j := &job{ tp: ddl, ddls: ddls, - location: location, - currentLocation: cmdLocation, + location: location1, + currentLocation: cmdLocation1, traceID: traceID, } @@ -125,10 +125,13 @@ func newDDLJob(ddlInfo *shardingDDLInfo, ddls []string, location, cmdLocation bi } func newXIDJob(location, cmdLocation binlog.Location, traceID string) *job { + location1 := location.Clone() + cmdLocation1 := cmdLocation.Clone() + return &job{ tp: xid, - location: location, - currentLocation: cmdLocation, + location: location1, + currentLocation: cmdLocation1, traceID: traceID, } } @@ -140,9 +143,11 @@ func newFlushJob() *job { } func newSkipJob(location binlog.Location) *job { + location1 := location.Clone() + return &job{ tp: skip, - location: location, + location: location1, } } diff --git a/syncer/sharding-meta/shardmeta_test.go b/syncer/sharding-meta/shardmeta_test.go index c1de16871d..79d8603216 100644 --- a/syncer/sharding-meta/shardmeta_test.go +++ b/syncer/sharding-meta/shardmeta_test.go @@ -92,7 +92,7 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.InSequenceSharding(), check.IsTrue) location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(location.Position, check.DeepEquals, items[1].FirstLocation) + c.Assert(location.Position, check.DeepEquals, items[1].FirstLocation.Position) sqls, args = meta.FlushData(sourceID, tableID) c.Assert(sqls, check.HasLen, 4) @@ -126,7 +126,7 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.InSequenceSharding(), check.IsTrue) location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(location.Position, check.DeepEquals, items[1].FirstLocation) + c.Assert(location.Position, check.DeepEquals, items[1].FirstLocation.Position) // find synced in shrading group, and call ShardingMeta.ResolveShardingDDL c.Assert(meta.ResolveShardingDDL(), check.IsFalse) @@ -138,7 +138,7 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.InSequenceSharding(), check.IsTrue) location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(location.Position, check.DeepEquals, items[2].FirstLocation) + c.Assert(location.Position, check.DeepEquals, items[2].FirstLocation.Position) sqls, args = meta.FlushData(sourceID, tableID) c.Assert(sqls, check.HasLen, 4) @@ -171,7 +171,7 @@ func (t *testShardMetaSuite) TestShardingMeta(c *check.C) { c.Assert(meta.InSequenceSharding(), check.IsTrue) location, err = meta.ActiveDDLFirstLocation() c.Assert(err, check.IsNil) - c.Assert(location.Position, check.DeepEquals, items[2].FirstLocation) + c.Assert(location.Position, check.DeepEquals, items[2].FirstLocation.Position) // find synced in shrading group, and call ShardingMeta.ResolveShardingDDL c.Assert(meta.ResolveShardingDDL(), check.IsTrue) diff --git a/syncer/syncer.go b/syncer/syncer.go index 6aa3723a05..7792d60903 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -753,7 +753,7 @@ func (s *Syncer) addJob(job *job) error { func (s *Syncer) saveGlobalPoint(globalLocation binlog.Location) { if s.cfg.IsSharding { // TODO: maybe need to compare GTID? - globalLocation = s.sgk.AdjustGlobalLocation(globalLocation) + globalLocation = s.sgk.AdjustGlobalLocation(globalLocation).Clone() } s.checkpoint.SaveGlobalPoint(globalLocation) } @@ -840,7 +840,7 @@ func (s *Syncer) syncDDL(tctx *tcontext.Context, queueBucket string, db *DBConn, if err != nil { s.appendExecErrors(&ExecErrorContext{ err: err, - location: sqlJob.currentLocation, + location: sqlJob.currentLocation.Clone(), jobs: fmt.Sprintf("%v", sqlJob.ddls), }) } @@ -911,7 +911,7 @@ func (s *Syncer) sync(tctx *tcontext.Context, queueBucket string, db *DBConn, jo } affected, err := db.executeSQL(tctx, queries, args...) if err != nil { - errCtx := &ExecErrorContext{err, jobs[affected].currentLocation, fmt.Sprintf("%v", jobs)} + errCtx := &ExecErrorContext{err, jobs[affected].currentLocation.Clone(), fmt.Sprintf("%v", jobs)} s.appendExecErrors(errCtx) } return err @@ -986,13 +986,13 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // we use currentPos to replace and skip binlog event of specified position and update table checkpoint in sharding ddl // we use lastPos to update global checkpoint and table checkpoint var ( - currentLocation = s.checkpoint.GlobalPoint() // also init to global checkpoint - lastLocation = s.checkpoint.GlobalPoint() + currentLocation = s.checkpoint.GlobalPoint().Clone() // also init to global checkpoint + lastLocation = s.checkpoint.GlobalPoint().Clone() ) s.tctx.L().Info("replicate binlog from checkpoint", zap.Stringer("checkpoint", lastLocation)) if s.streamerController.IsClosed() { - err = s.streamerController.Start(tctx, lastLocation) + err = s.streamerController.Start(tctx, lastLocation.Clone()) if err != nil { return terror.Annotate(err, "fail to restart streamer controller") } @@ -1083,24 +1083,24 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // if remaining DDLs in sequence, redirect global stream to the next sharding DDL position if !shardingReSync.allResolved { - nextPos, err2 := s.sgk.ActiveDDLFirstLocation(shardingReSync.targetSchema, shardingReSync.targetTable) + nextLocation, err2 := s.sgk.ActiveDDLFirstLocation(shardingReSync.targetSchema, shardingReSync.targetTable) if err2 != nil { return err2 } - err2 = s.streamerController.RedirectStreamer(s.tctx, nextPos) + err2 = s.streamerController.RedirectStreamer(s.tctx, nextLocation.Clone()) if err2 != nil { return err2 } } shardingReSync = nil - lastLocation = savedGlobalLastLocation // restore global last pos + lastLocation = savedGlobalLastLocation.Clone() // restore global last pos return nil } for { s.currentLocationMu.Lock() - s.currentLocationMu.currentLocation = currentLocation + s.currentLocationMu.currentLocation = currentLocation.Clone() s.currentLocationMu.Unlock() // fetch from sharding resync channel if needed, and redirect global @@ -1108,10 +1108,10 @@ func (s *Syncer) Run(ctx context.Context) (err error) { if shardingReSync == nil && len(shardingReSyncCh) > 0 { // some sharding groups need to re-syncing shardingReSync = <-shardingReSyncCh - savedGlobalLastLocation = lastLocation // save global last location - lastLocation = shardingReSync.currLocation + savedGlobalLastLocation = lastLocation.Clone() // save global last location + lastLocation = shardingReSync.currLocation.Clone() - err = s.streamerController.RedirectStreamer(s.tctx, shardingReSync.currLocation) + err = s.streamerController.RedirectStreamer(s.tctx, shardingReSync.currLocation.Clone()) if err != nil { return err } @@ -1126,7 +1126,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // we only inject sqls in global streaming to avoid DDL position confusion if shardingReSync == nil { - e = s.tryInject(latestOp, currentLocation) + e = s.tryInject(latestOp, currentLocation.Clone()) latestOp = null } if e == nil { @@ -1151,7 +1151,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { eventTimeoutCounter = 0 if s.needResync() { s.tctx.L().Info("timeout when fetching binlog event, there must be some problems with replica connection, try to re-connect") - err = s.streamerController.ReopenWithRetry(tctx, lastLocation) + err = s.streamerController.ReopenWithRetry(tctx, lastLocation.Clone()) if err != nil { return err } @@ -1160,7 +1160,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { } else if isDuplicateServerIDError(err) { // if the server id is already used, need to use a new server id tctx.L().Info("server id is already used by another slave, will change to a new server id and get event again") - err1 := s.streamerController.UpdateServerIDAndResetReplication(tctx, lastLocation) + err1 := s.streamerController.UpdateServerIDAndResetReplication(tctx, lastLocation.Clone()) if err1 != nil { return err1 } @@ -1171,7 +1171,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { s.tctx.L().Error("fail to fetch binlog", log.ShortError(err)) if s.streamerController.CanRetry() { - err = s.streamerController.ResetReplicationSyncer(s.tctx, lastLocation) + err = s.streamerController.ResetReplicationSyncer(s.tctx, lastLocation.Clone()) if err != nil { return err } @@ -1181,7 +1181,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // try to re-sync in gtid mode if tryReSync && s.cfg.EnableGTID && isBinlogPurgedError(err) && s.cfg.AutoFixGTID { time.Sleep(retryTimeout) - err = s.reSyncBinlog(*tctx, lastLocation) + err = s.reSyncBinlog(*tctx, lastLocation.Clone()) if err != nil { return err } @@ -1242,10 +1242,10 @@ func (s *Syncer) Run(ctx context.Context) (err error) { case *replication.XIDEvent: if shardingReSync != nil { shardingReSync.currLocation.Position.Pos = e.Header.LogPos - // TODO:update gtid + shardingReSync.currLocation.GTIDSet.Set(ev.GSet) // only need compare binlog position? - lastLocation = shardingReSync.currLocation + lastLocation = shardingReSync.currLocation.Clone() if binlog.CompareLocation(shardingReSync.currLocation, shardingReSync.latestLocation) >= 0 { s.tctx.L().Info("re-replicate shard group was completed", zap.String("event", "XID"), zap.Reflect("re-shard", shardingReSync)) err = closeShardingResync() @@ -1494,7 +1494,6 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e binlogSkippedEventsTotal.WithLabelValues("query", s.cfg.Name).Inc() s.tctx.L().Warn("skip event", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema)) *ec.lastLocation = *ec.currentLocation // before record skip location, update lastLocation - // TODO: update GTID return s.recordSkipSQLsLocation(*ec.lastLocation) } if !parseResult.isDDL { @@ -1504,7 +1503,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e if ec.shardingReSync != nil { ec.shardingReSync.currLocation.Position.Pos = ec.header.LogPos - // TODO: update GTID? + ec.shardingReSync.currLocation.GTIDSet.Set(ev.GSet) if binlog.CompareLocation(ec.shardingReSync.currLocation, ec.shardingReSync.latestLocation) >= 0 { s.tctx.L().Info("re-replicate shard group was completed", zap.String("event", "query"), zap.String("statement", sql), zap.Reflect("re-shard", ec.shardingReSync)) err2 := ec.closeShardingResync() @@ -2003,7 +2002,7 @@ func (s *Syncer) printStatus(ctx context.Context) { totalTps = total / totalSeconds s.currentLocationMu.RLock() - currentLocation := s.currentLocationMu.currentLocation + currentLocation := s.currentLocationMu.currentLocation.Clone() s.currentLocationMu.RUnlock() remainingSize, err2 := s.fromDB.countBinaryLogsSize(currentLocation.Position) diff --git a/tests/incremental_mode/conf/dm-task.yaml b/tests/incremental_mode/conf/dm-task.yaml index 9f6cdd201b..d7018c5924 100644 --- a/tests/incremental_mode/conf/dm-task.yaml +++ b/tests/incremental_mode/conf/dm-task.yaml @@ -20,6 +20,7 @@ mysql-instances: meta: binlog-name: binlog-name-placeholder-1 binlog-pos: binlog-pos-placeholder-1 + binlog-gtid: binlog-gtid-placeholder-1 black-white-list: "instance" mydumper-config-name: "global" loader-config-name: "global" @@ -29,6 +30,7 @@ mysql-instances: meta: binlog-name: binlog-name-placeholder-2 binlog-pos: binlog-pos-placeholder-2 + binlog-gtid: binlog-gtid-placeholder-2 black-white-list: "instance" mydumper-config-name: "global" loader-config-name: "global" diff --git a/tests/incremental_mode/run.sh b/tests/incremental_mode/run.sh index c15d4a4580..91f69cf685 100755 --- a/tests/incremental_mode/run.sh +++ b/tests/incremental_mode/run.sh @@ -46,13 +46,17 @@ function run() { then name1=$(grep "Log: " $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2}'|tr -d ' ') pos1=$(grep "Pos: " $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2}'|tr -d ' ') + gtid1=$(grep "GTID:" $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2,":",$3}'|tr -d ' ') name2=$(grep "Log: " $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2}'|tr -d ' ') pos2=$(grep "Pos: " $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2}'|tr -d ' ') + gtid2=$(grep "GTID:" $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2,":",$3}'|tr -d ' ') else name2=$(grep "Log: " $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2}'|tr -d ' ') pos2=$(grep "Pos: " $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2}'|tr -d ' ') + gtid2=$(grep "GTID:" $WORK_DIR/worker1/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2,":",$3}'|tr -d ' ') name1=$(grep "Log: " $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2}'|tr -d ' ') pos1=$(grep "Pos: " $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2}'|tr -d ' ') + gtid1=$(grep "GTID:" $WORK_DIR/worker2/dumped_data.$TASK_NAME/metadata|awk -F: '{print $2,":",$3}'|tr -d ' ') fi # kill worker1 and worker2 kill_dm_worker @@ -94,8 +98,12 @@ function run() { sed -i "s/task-mode-placeholder/incremental/g" $WORK_DIR/dm-task.yaml sed -i "s/binlog-name-placeholder-1/$name1/g" $WORK_DIR/dm-task.yaml sed -i "s/binlog-pos-placeholder-1/$pos1/g" $WORK_DIR/dm-task.yaml + sed -i "s/binlog-gtid-placeholder-1/$gtid1/g" $WORK_DIR/dm-task.yaml + sed -i "s/binlog-name-placeholder-2/$name2/g" $WORK_DIR/dm-task.yaml sed -i "s/binlog-pos-placeholder-2/$pos2/g" $WORK_DIR/dm-task.yaml + sed -i "s/binlog-gtid-placeholder-2/$gtid2/g" $WORK_DIR/dm-task.yaml + sleep 3 dmctl_start_task $WORK_DIR/dm-task.yaml From 1ac97e04696b8c253a26de7fd255113fd4af9a7a Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 18:52:10 +0800 Subject: [PATCH 12/35] minor fix on pointer --- pkg/gtid/gtid.go | 1 + syncer/checkpoint.go | 9 ++------- syncer/sharding-meta/shardmeta.go | 19 +++++++++++++++++-- syncer/sharding_group.go | 4 +++- syncer/syncer.go | 18 ++++++++++-------- tests/_utils/check_sync_diff | 2 +- tests/sharding/conf/source2.toml | 2 +- 7 files changed, 35 insertions(+), 20 deletions(-) diff --git a/pkg/gtid/gtid.go b/pkg/gtid/gtid.go index 98f4e82aff..57c95b6669 100644 --- a/pkg/gtid/gtid.go +++ b/pkg/gtid/gtid.go @@ -139,6 +139,7 @@ func (g *MySQLGTIDSet) Clone() Set { if g.set == nil { return &MySQLGTIDSet{} } + return &MySQLGTIDSet{ set: g.set.Clone().(*mysql.MysqlGTIDSet), } diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 5d34141c75..47b9d48fb1 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -321,14 +321,9 @@ func (cp *RemoteCheckPoint) SaveTablePoint(sourceSchema, sourceTable string, poi // saveTablePoint saves single table's checkpoint without mutex.Lock func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, location pbinlog.Location, ti *model.TableInfo) { - /* - if pbinlog.ComparePosition(cp.globalPoint.Position, pos) > 0 { - panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", pos, cp.globalPoint)) - } - */ if pbinlog.CompareLocation(cp.globalPoint.location, location) > 0 { - //panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) - cp.logCtx.L().Error(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) + panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) + //cp.logCtx.L().Error(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) } // we save table checkpoint while we meet DDL or DML diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index e62115b0e2..07079ba825 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -39,9 +39,9 @@ type DDLItem struct { } // NewDDLItem creates a new DDLItem -func NewDDLItem(location binlog.Location, ddls []string, source string) *DDLItem { +func NewDDLItem(location *binlog.Location, ddls []string, source string) *DDLItem { return &DDLItem{ - FirstLocation: location, + FirstLocation: location.Clone(), DDLs: ddls, Source: source, } @@ -90,6 +90,17 @@ type ShardingMeta struct { table string // table name used in downstream meta db } +func (s *ShardingMeta) String() string { + str := "" + str += fmt.Sprintf("activeIdx: %d, schema: %s, table: %s, global: %v, sources: {", s.activeIdx, s.schema, s.table, s.global) + for source, ss := range s.sources { + str += fmt.Sprintf("%s: %v, ", source, ss) + } + + str += "}" + return str +} + // NewShardingMeta creates a new ShardingMeta func NewShardingMeta(schema, table string) *ShardingMeta { return &ShardingMeta{ @@ -136,6 +147,8 @@ func (meta *ShardingMeta) checkItemExists(item *DDLItem) (int, bool) { return 0, false } for idx, ddlItem := range source.Items { + equal := binlog.CompareLocation(item.FirstLocation, ddlItem.FirstLocation) + log.L().Info("checkItemExists", zap.Stringer("location1", item.FirstLocation), zap.Stringer("location2", ddlItem.FirstLocation), zap.Int("equal", equal)) if binlog.CompareLocation(item.FirstLocation, ddlItem.FirstLocation) == 0 { return idx, true } @@ -151,6 +164,8 @@ func (meta *ShardingMeta) checkItemExists(item *DDLItem) (int, bool) { // returns: // active: whether the DDL will be processed in this round func (meta *ShardingMeta) AddItem(item *DDLItem) (active bool, err error) { + log.L().Info("AddItem", zap.Reflect("item", item), zap.Reflect("firstLocation", item.FirstLocation), zap.Stringer("meta", meta)) + index, exists := meta.checkItemExists(item) if exists { return index == meta.activeIdx, nil diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index 214eb1067a..f6f6540ec6 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -82,6 +82,7 @@ import ( tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/terror" shardmeta "github.com/pingcap/dm/syncer/sharding-meta" + "github.com/pingcap/dm/pkg/log" "github.com/siddontang/go-mysql/mysql" "go.uber.org/zap" @@ -202,7 +203,8 @@ func (sg *ShardingGroup) TrySync(source string, location, endLocation binlog.Loc sg.Lock() defer sg.Unlock() - ddlItem := shardmeta.NewDDLItem(location, ddls, source) + ddlItem := shardmeta.NewDDLItem(&location, ddls, source) + log.L().Info("TrySync", zap.Stringer("location", location), zap.Reflect("ddlItem", ddlItem), zap.Reflect("FirstLocation", ddlItem.FirstLocation)) active, err := sg.meta.AddItem(ddlItem) if err != nil { return sg.remain <= 0, active, sg.remain, err diff --git a/syncer/syncer.go b/syncer/syncer.go index 7792d60903..99c32b0117 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1300,12 +1300,12 @@ func (s *Syncer) handleRotateEvent(ev *replication.RotateEvent, ec eventContext) GTIDSet: ec.currentLocation.GTIDSet, } if binlog.CompareLocation(*ec.currentLocation, *ec.lastLocation) > 0 { - *ec.lastLocation = *ec.currentLocation + *ec.lastLocation = ec.currentLocation.Clone() } if ec.shardingReSync != nil { if binlog.CompareLocation(*ec.currentLocation, ec.shardingReSync.currLocation) > 0 { - ec.shardingReSync.currLocation = *ec.currentLocation + ec.shardingReSync.currLocation = ec.currentLocation.Clone() } if binlog.CompareLocation(ec.shardingReSync.currLocation, ec.shardingReSync.latestLocation) >= 0 { @@ -1333,7 +1333,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err Name: ec.lastLocation.Position.Name, Pos: ec.header.LogPos, }, - GTIDSet: ec.lastLocation.GTIDSet, + GTIDSet: ec.lastLocation.GTIDSet.Clone(), } if ec.shardingReSync != nil { @@ -1478,7 +1478,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e Name: ec.lastLocation.Position.Name, Pos: ec.header.LogPos, }, - GTIDSet: ec.lastLocation.GTIDSet, + GTIDSet: ec.lastLocation.GTIDSet.Clone(), } ec.currentLocation.GTIDSet.Set(ev.GSet) @@ -1493,7 +1493,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e if parseResult.ignore { binlogSkippedEventsTotal.WithLabelValues("query", s.cfg.Name).Inc() s.tctx.L().Warn("skip event", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema)) - *ec.lastLocation = *ec.currentLocation // before record skip location, update lastLocation + *ec.lastLocation = ec.currentLocation.Clone() // before record skip location, update lastLocation return s.recordSkipSQLsLocation(*ec.lastLocation) } if !parseResult.isDDL { @@ -1514,7 +1514,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // in re-syncing, we can simply skip all DDLs, // as they have been added to sharding DDL sequence // only update lastPos when the query is a real DDL - *ec.lastLocation = ec.shardingReSync.currLocation + *ec.lastLocation = ec.shardingReSync.currLocation.Clone() // TODO: set gtid s.tctx.L().Debug("skip event in re-replicating sharding group", zap.String("event", "query"), zap.String("statement", sql), zap.Reflect("re-shard", ec.shardingReSync)) } @@ -1522,7 +1522,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } s.tctx.L().Info("", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet)) - *ec.lastLocation = *ec.currentLocation // update lastLocation, because we have checked `isDDL` + *ec.lastLocation = ec.currentLocation.Clone() // update lastLocation, because we have checked `isDDL` *ec.latestOp = ddl var ( @@ -1699,6 +1699,8 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e }, GTIDSet: ec.lastLocation.GTIDSet, } + s.tctx.L().Info("", zap.Stringer("startLocation", startLocation)) + source, _ = GenTableID(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name) var annotate string @@ -1771,7 +1773,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } *ec.shardingReSyncCh <- &ShardingReSync{ currLocation: *firstEndLocation, - latestLocation: *ec.currentLocation, + latestLocation: ec.currentLocation.Clone(), targetSchema: ddlInfo.tableNames[1][0].Schema, targetTable: ddlInfo.tableNames[1][0].Name, allResolved: allResolved, diff --git a/tests/_utils/check_sync_diff b/tests/_utils/check_sync_diff index 54e72faf54..ed3d324fa3 100755 --- a/tests/_utils/check_sync_diff +++ b/tests/_utils/check_sync_diff @@ -19,7 +19,7 @@ cd $workdir i=0 while [ $i -lt $check_time ] do - $binary --config=$conf >> $LOG 2>&1 + $binary --config=$conf > $LOG 2>&1 ret=$? if [ "$ret" == 0 ]; then echo "check diff successfully" diff --git a/tests/sharding/conf/source2.toml b/tests/sharding/conf/source2.toml index 5455fbb4f7..baabf7d73f 100644 --- a/tests/sharding/conf/source2.toml +++ b/tests/sharding/conf/source2.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-02" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" From b5a50a37bcc5177812e7a18edf88b7ad57b49f61 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 19:01:25 +0800 Subject: [PATCH 13/35] remove useless code --- syncer/sharding-meta/shardmeta.go | 19 ++----------------- syncer/sharding_group.go | 4 +--- syncer/syncer.go | 2 +- tests/sharding/conf/source2.toml | 2 +- 4 files changed, 5 insertions(+), 22 deletions(-) diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index 07079ba825..e62115b0e2 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -39,9 +39,9 @@ type DDLItem struct { } // NewDDLItem creates a new DDLItem -func NewDDLItem(location *binlog.Location, ddls []string, source string) *DDLItem { +func NewDDLItem(location binlog.Location, ddls []string, source string) *DDLItem { return &DDLItem{ - FirstLocation: location.Clone(), + FirstLocation: location, DDLs: ddls, Source: source, } @@ -90,17 +90,6 @@ type ShardingMeta struct { table string // table name used in downstream meta db } -func (s *ShardingMeta) String() string { - str := "" - str += fmt.Sprintf("activeIdx: %d, schema: %s, table: %s, global: %v, sources: {", s.activeIdx, s.schema, s.table, s.global) - for source, ss := range s.sources { - str += fmt.Sprintf("%s: %v, ", source, ss) - } - - str += "}" - return str -} - // NewShardingMeta creates a new ShardingMeta func NewShardingMeta(schema, table string) *ShardingMeta { return &ShardingMeta{ @@ -147,8 +136,6 @@ func (meta *ShardingMeta) checkItemExists(item *DDLItem) (int, bool) { return 0, false } for idx, ddlItem := range source.Items { - equal := binlog.CompareLocation(item.FirstLocation, ddlItem.FirstLocation) - log.L().Info("checkItemExists", zap.Stringer("location1", item.FirstLocation), zap.Stringer("location2", ddlItem.FirstLocation), zap.Int("equal", equal)) if binlog.CompareLocation(item.FirstLocation, ddlItem.FirstLocation) == 0 { return idx, true } @@ -164,8 +151,6 @@ func (meta *ShardingMeta) checkItemExists(item *DDLItem) (int, bool) { // returns: // active: whether the DDL will be processed in this round func (meta *ShardingMeta) AddItem(item *DDLItem) (active bool, err error) { - log.L().Info("AddItem", zap.Reflect("item", item), zap.Reflect("firstLocation", item.FirstLocation), zap.Stringer("meta", meta)) - index, exists := meta.checkItemExists(item) if exists { return index == meta.activeIdx, nil diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index f6f6540ec6..214eb1067a 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -82,7 +82,6 @@ import ( tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/terror" shardmeta "github.com/pingcap/dm/syncer/sharding-meta" - "github.com/pingcap/dm/pkg/log" "github.com/siddontang/go-mysql/mysql" "go.uber.org/zap" @@ -203,8 +202,7 @@ func (sg *ShardingGroup) TrySync(source string, location, endLocation binlog.Loc sg.Lock() defer sg.Unlock() - ddlItem := shardmeta.NewDDLItem(&location, ddls, source) - log.L().Info("TrySync", zap.Stringer("location", location), zap.Reflect("ddlItem", ddlItem), zap.Reflect("FirstLocation", ddlItem.FirstLocation)) + ddlItem := shardmeta.NewDDLItem(location, ddls, source) active, err := sg.meta.AddItem(ddlItem) if err != nil { return sg.remain <= 0, active, sg.remain, err diff --git a/syncer/syncer.go b/syncer/syncer.go index 99c32b0117..2bb7c35b61 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1697,7 +1697,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e Name: ec.currentLocation.Position.Name, Pos: ec.currentLocation.Position.Pos - ec.header.EventSize, }, - GTIDSet: ec.lastLocation.GTIDSet, + GTIDSet: ec.lastLocation.GTIDSet.Clone(), } s.tctx.L().Info("", zap.Stringer("startLocation", startLocation)) diff --git a/tests/sharding/conf/source2.toml b/tests/sharding/conf/source2.toml index baabf7d73f..5455fbb4f7 100644 --- a/tests/sharding/conf/source2.toml +++ b/tests/sharding/conf/source2.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-02" flavor = "" -enable-gtid = true +enable-gtid = false relay-binlog-name = "" relay-binlog-gtid = "" From bd70d96ff9bfb7d98b9f49af1a579f0262674024 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 19:14:31 +0800 Subject: [PATCH 14/35] minor update --- syncer/checkpoint.go | 22 +++++----------------- syncer/inject_sql.go | 1 - syncer/job.go | 10 +++------- syncer/operator.go | 2 -- syncer/sharding-meta/shardmeta.go | 1 - syncer/syncer.go | 2 -- syncer/warning.go | 2 -- 7 files changed, 8 insertions(+), 32 deletions(-) diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 47b9d48fb1..e634d3c321 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -59,14 +59,10 @@ var ( type binlogPoint struct { sync.RWMutex - //pbinlog.Location - //gtid string location pbinlog.Location ti *model.TableInfo - //flushedPos pbinlog.Location // pos which flushed permanently - //flushedGTID string // gtid which flushed permanently - flushedLocation pbinlog.Location + flushedLocation pbinlog.Location // location which flushed permanently flushedTI *model.TableInfo } @@ -82,13 +78,7 @@ func newBinlogPoint(location, flushedLocation pbinlog.Location, ti, flushedTI *m func (b *binlogPoint) save(location pbinlog.Location, ti *model.TableInfo) error { b.Lock() defer b.Unlock() - // TODO: add gtid compare - /* - if pbinlog.ComparePosition(pos, b.Position) < 0 { - // support to save equal pos, but not older pos - return terror.ErrCheckpointSaveInvalidPos.Generate(pos, b.Position) - } - */ + if pbinlog.CompareLocation(location, b.location) < 0 { // support to save equal pos, but not older pos return terror.ErrCheckpointSaveInvalidPos.Generate(location, b.location.Position) @@ -119,19 +109,18 @@ func (b *binlogPoint) rollback() (isSchemaChanged bool) { func (b *binlogPoint) outOfDate() bool { b.RLock() defer b.RUnlock() - // TODO: add gtid compare - //return pbinlog.ComparePosition(b.Position, b.flushedPos) > 0 + return pbinlog.CompareLocation(b.location, b.flushedLocation) > 0 } -// MySQLPoint returns point as pbinlog.Location +// MySQLLocation returns point as pbinlog.Location func (b *binlogPoint) MySQLLocation() pbinlog.Location { b.RLock() defer b.RUnlock() return b.location } -// FlushedMySQLPoint returns flushed point as pbinlog.Location +// FlushedMySQLLocation returns flushed point as pbinlog.Location func (b *binlogPoint) FlushedMySQLLocation() pbinlog.Location { b.RLock() defer b.RUnlock() @@ -323,7 +312,6 @@ func (cp *RemoteCheckPoint) SaveTablePoint(sourceSchema, sourceTable string, poi func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, location pbinlog.Location, ti *model.TableInfo) { if pbinlog.CompareLocation(cp.globalPoint.location, location) > 0 { panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) - //cp.logCtx.L().Error(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) } // we save table checkpoint while we meet DDL or DML diff --git a/syncer/inject_sql.go b/syncer/inject_sql.go index 1f79c768e8..db29203df6 100644 --- a/syncer/inject_sql.go +++ b/syncer/inject_sql.go @@ -19,7 +19,6 @@ import ( "github.com/pingcap/parser" "github.com/pingcap/parser/ast" - //"github.com/siddontang/go-mysql/mysql" "github.com/siddontang/go-mysql/replication" "go.uber.org/zap" diff --git a/syncer/job.go b/syncer/job.go index 9bb6c784f4..aa7ba37c7f 100644 --- a/syncer/job.go +++ b/syncer/job.go @@ -16,9 +16,6 @@ package syncer import ( "fmt" - //"github.com/siddontang/go-mysql/mysql" - - //"github.com/pingcap/dm/pkg/gtid" "github.com/pingcap/dm/pkg/binlog" ) @@ -71,10 +68,9 @@ type job struct { retry bool location binlog.Location currentLocation binlog.Location // exactly binlog position of current SQL - //gtidSet gtid.Set - ddls []string - traceID string - traceGID string + ddls []string + traceID string + traceGID string } func (j *job) String() string { diff --git a/syncer/operator.go b/syncer/operator.go index 40fa479336..d6ba766325 100644 --- a/syncer/operator.go +++ b/syncer/operator.go @@ -14,8 +14,6 @@ package syncer import ( - //"github.com/siddontang/go-mysql/mysql" - "github.com/pingcap/dm/dm/pb" "github.com/pingcap/dm/pkg/binlog" ) diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index e62115b0e2..83641d9893 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -17,7 +17,6 @@ import ( "encoding/json" "fmt" - //"github.com/siddontang/go-mysql/mysql" "go.uber.org/zap" "github.com/pingcap/dm/pkg/binlog" diff --git a/syncer/syncer.go b/syncer/syncer.go index 2bb7c35b61..ed4f479085 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1515,7 +1515,6 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // as they have been added to sharding DDL sequence // only update lastPos when the query is a real DDL *ec.lastLocation = ec.shardingReSync.currLocation.Clone() - // TODO: set gtid s.tctx.L().Debug("skip event in re-replicating sharding group", zap.String("event", "query"), zap.String("statement", sql), zap.Reflect("re-shard", ec.shardingReSync)) } return nil @@ -1699,7 +1698,6 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e }, GTIDSet: ec.lastLocation.GTIDSet.Clone(), } - s.tctx.L().Info("", zap.Stringer("startLocation", startLocation)) source, _ = GenTableID(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name) diff --git a/syncer/warning.go b/syncer/warning.go index 6b42257f63..7280d56f51 100644 --- a/syncer/warning.go +++ b/syncer/warning.go @@ -17,8 +17,6 @@ import ( "fmt" "sort" - //"github.com/siddontang/go-mysql/mysql" - "github.com/pingcap/dm/dm/pb" "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/utils" From 79d86e64224cc77c45a17d4748f3a2714664d39a Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 20:03:25 +0800 Subject: [PATCH 15/35] fix shard meta data restore --- pkg/binlog/position.go | 2 +- syncer/sharding-meta/shardmeta.go | 34 +++++++++++++++++++++++--- syncer/sharding-meta/shardmeta_test.go | 2 +- syncer/sharding_group.go | 4 +-- syncer/syncer.go | 2 +- 5 files changed, 35 insertions(+), 9 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index a8926d376d..509e09487e 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -171,7 +171,7 @@ type Location struct { } func (l Location) String() string { - return fmt.Sprintf("Position: %v, GTIDSet: %s", l.Position, l.GTIDSet) + return fmt.Sprintf("position: %v, gtid-set: %s", l.Position, l.GTIDSet) } // Clone clones a same Location diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index 83641d9893..5bcb2b9ca9 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -17,9 +17,11 @@ import ( "encoding/json" "fmt" + "github.com/siddontang/go-mysql/mysql" "go.uber.org/zap" "github.com/pingcap/dm/pkg/binlog" + "github.com/pingcap/dm/pkg/gtid" "github.com/pingcap/dm/pkg/log" "github.com/pingcap/dm/pkg/terror" "github.com/pingcap/dm/pkg/utils" @@ -32,9 +34,14 @@ const ( // DDLItem records ddl information used in sharding sequence organization type DDLItem struct { - FirstLocation binlog.Location `json:"first-location"` // first DDL's binlog Pos, not the End_log_pos of the event - DDLs []string `json:"ddls"` // DDLs, these ddls are in the same QueryEvent - Source string `json:"source"` // source table ID + FirstLocation binlog.Location `json:"-"` // first DDL's binlog Pos, not the End_log_pos of the event + DDLs []string `json:"ddls"` // DDLs, these ddls are in the same QueryEvent + Source string `json:"source"` // source table ID + + // just used for json's marshal and unmarshal, because gtid.Set in FirstLocation is interface, + // can't be marshal and unmarshal + FirstPosition mysql.Position `json:"first-position"` + FirstGTIDSet string `json:"first-gtid-set"` } // NewDDLItem creates a new DDLItem @@ -100,12 +107,25 @@ func NewShardingMeta(schema, table string) *ShardingMeta { } // RestoreFromData restores ShardingMeta from given data -func (meta *ShardingMeta) RestoreFromData(sourceTableID string, activeIdx int, isGlobal bool, data []byte) error { +func (meta *ShardingMeta) RestoreFromData(sourceTableID string, activeIdx int, isGlobal bool, data []byte, flavor string) error { items := make([]*DDLItem, 0) err := json.Unmarshal(data, &items) if err != nil { return terror.ErrSyncUnitInvalidShardMeta.Delegate(err) } + + // set FirstLocation + for _, item := range items { + gset, err1 := gtid.ParserGTID(flavor, item.FirstGTIDSet) + if err1 != nil { + return err1 + } + item.FirstLocation = binlog.Location{ + Position: item.FirstPosition, + GTIDSet: gset, + } + } + if isGlobal { meta.global = &ShardingSequence{Items: items} } else { @@ -236,6 +256,12 @@ func (meta *ShardingMeta) ActiveDDLFirstLocation() (binlog.Location, error) { // FlushData returns sharding meta flush SQL and args func (meta *ShardingMeta) FlushData(sourceID, tableID string) ([]string, [][]interface{}) { + // set FirstPosition and FirstGTIDSet for json marshal + for _, item := range meta.global.Items { + item.FirstPosition = item.FirstLocation.Position + item.FirstGTIDSet = item.FirstLocation.GTIDSet.String() + } + if len(meta.global.Items) == 0 { sql2 := fmt.Sprintf("DELETE FROM `%s`.`%s` where source_id=? and target_table_id=?", meta.schema, meta.table) args2 := []interface{}{sourceID, tableID} diff --git a/syncer/sharding-meta/shardmeta_test.go b/syncer/sharding-meta/shardmeta_test.go index 79d8603216..4c0c4569f4 100644 --- a/syncer/sharding-meta/shardmeta_test.go +++ b/syncer/sharding-meta/shardmeta_test.go @@ -268,7 +268,7 @@ func (t *testShardMetaSuite) TestFlushLoadMeta(c *check.C) { c.Assert(args, check.HasLen, 3) for _, arg := range args { c.Assert(arg, check.HasLen, 8) - loadedMeta.RestoreFromData(arg[2].(string), arg[3].(int), arg[4].(bool), []byte(arg[5].(string))) + loadedMeta.RestoreFromData(arg[2].(string), arg[3].(int), arg[4].(bool), []byte(arg[5].(string)), mysql.MySQLFlavor) } c.Assert(loadedMeta, check.DeepEquals, meta) } diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index 214eb1067a..029976c3d0 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -737,7 +737,7 @@ func (k *ShardingGroupKeeper) createTable() error { } // LoadShardMeta implements CheckPoint.LoadShardMeta -func (k *ShardingGroupKeeper) LoadShardMeta() (map[string]*shardmeta.ShardingMeta, error) { +func (k *ShardingGroupKeeper) LoadShardMeta(flavor string) (map[string]*shardmeta.ShardingMeta, error) { query := fmt.Sprintf("SELECT `target_table_id`, `source_table_id`, `active_index`, `is_global`, `data` FROM `%s`.`%s` WHERE `source_id`='%s'", k.shardMetaSchema, k.shardMetaTable, k.cfg.SourceID) rows, err := k.dbConn.querySQL(k.tctx, query) if err != nil { @@ -761,7 +761,7 @@ func (k *ShardingGroupKeeper) LoadShardMeta() (map[string]*shardmeta.ShardingMet if _, ok := meta[targetTableID]; !ok { meta[targetTableID] = shardmeta.NewShardingMeta(k.shardMetaSchema, k.shardMetaTable) } - err = meta[targetTableID].RestoreFromData(sourceTableID, activeIndex, isGlobal, []byte(data)) + err = meta[targetTableID].RestoreFromData(sourceTableID, activeIndex, isGlobal, []byte(data), flavor) if err != nil { return nil, err } diff --git a/syncer/syncer.go b/syncer/syncer.go index ed4f479085..091fa1873d 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -399,7 +399,7 @@ func (s *Syncer) initShardingGroups() error { } } - loadMeta, err2 := s.sgk.LoadShardMeta() + loadMeta, err2 := s.sgk.LoadShardMeta(s.cfg.Flavor) if err2 != nil { return err2 } From 6d3ab39052ef4bca5acb76d12b34566125b56397 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 23:15:58 +0800 Subject: [PATCH 16/35] address comment --- pkg/binlog/position.go | 5 +++++ pkg/utils/mydumper.go | 12 ++++++------ syncer/checkpoint.go | 10 +++++----- syncer/sharding-meta/shardmeta.go | 2 +- syncer/sharding_group.go | 10 +++++----- syncer/status.go | 13 +++++++++++-- syncer/syncer.go | 19 +++++++++---------- syncer/syncer_test.go | 4 ++-- 8 files changed, 44 insertions(+), 31 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 509e09487e..6d962f2b6e 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -19,8 +19,10 @@ import ( "strings" gmysql "github.com/siddontang/go-mysql/mysql" + "go.uber.org/zap" "github.com/pingcap/dm/pkg/gtid" + "github.com/pingcap/dm/pkg/log" "github.com/pingcap/dm/pkg/terror" "github.com/pingcap/dm/pkg/utils" ) @@ -201,6 +203,9 @@ func CompareLocation(location1, location2 Location) int { } else if contain2 { return -1 } + + // can't compare location by gtid, and will compare by position + log.L().Warn("gtidSet can't be compared", zap.Stringer("location1", location1), zap.Stringer("location2", location2)) } } diff --git a/pkg/utils/mydumper.go b/pkg/utils/mydumper.go index 08d0347917..be0c41f3dc 100644 --- a/pkg/utils/mydumper.go +++ b/pkg/utils/mydumper.go @@ -27,14 +27,14 @@ import ( ) // ParseMetaData parses mydumper's output meta file and returns binlog position and GTID -func ParseMetaData(filename string) (*mysql.Position, string, error) { +func ParseMetaData(filename string) (mysql.Position, string, error) { fd, err := os.Open(filename) if err != nil { - return nil, "", terror.ErrParseMydumperMeta.Generate(err) + return mysql.Position{}, "", terror.ErrParseMydumperMeta.Generate(err) } defer fd.Close() - pos := new(mysql.Position) + pos := mysql.Position{} gtid := "" br := bufio.NewReader(fd) @@ -43,7 +43,7 @@ func ParseMetaData(filename string) (*mysql.Position, string, error) { if err == io.EOF { break } else if err != nil { - return nil, "", terror.ErrParseMydumperMeta.Generate(err) + return mysql.Position{}, "", terror.ErrParseMydumperMeta.Generate(err) } line = strings.TrimSpace(line[:len(line)-1]) if len(line) == 0 { @@ -67,7 +67,7 @@ func ParseMetaData(filename string) (*mysql.Position, string, error) { case "Pos": pos64, err := strconv.ParseUint(value, 10, 32) if err != nil { - return nil, "", terror.ErrParseMydumperMeta.Generate(err) + return mysql.Position{}, "", terror.ErrParseMydumperMeta.Generate(err) } pos.Pos = uint32(pos64) case "GTID": @@ -76,7 +76,7 @@ func ParseMetaData(filename string) (*mysql.Position, string, error) { } if len(pos.Name) == 0 || pos.Pos == uint32(0) { - return nil, "", terror.ErrParseMydumperMeta.Generate(fmt.Sprintf("file %s invalid format", filename)) + return mysql.Position{}, "", terror.ErrParseMydumperMeta.Generate(fmt.Sprintf("file %s invalid format", filename)) } return pos, gtid, nil diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index e634d3c321..3677f3377e 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -117,7 +117,7 @@ func (b *binlogPoint) outOfDate() bool { func (b *binlogPoint) MySQLLocation() pbinlog.Location { b.RLock() defer b.RUnlock() - return b.location + return b.location.Clone() } // FlushedMySQLLocation returns flushed point as pbinlog.Location @@ -758,7 +758,7 @@ func (cp *RemoteCheckPoint) parseMetaData() (*pbinlog.Location, error) { } return &pbinlog.Location{ - Position: *pos, + Position: pos, GTIDSet: gset, }, nil } @@ -778,9 +778,9 @@ func minLocation(flavor string) pbinlog.Location { } func minGTIDSet(flavor string) gtid.Set { - if flavor == mysql.MySQLFlavor { - return >id.MySQLGTIDSet{} + if flavor == mysql.MariaDBFlavor { + return >id.MariadbGTIDSet{} } - return >id.MariadbGTIDSet{} + return >id.MySQLGTIDSet{} } diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index 5bcb2b9ca9..646023e87e 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -251,7 +251,7 @@ func (meta *ShardingMeta) ActiveDDLFirstLocation() (binlog.Location, error) { if meta.activeIdx >= len(meta.global.Items) { return binlog.Location{}, terror.ErrSyncUnitDDLActiveIndexLarger.Generate(meta.activeIdx, meta.global.Items) } - return meta.global.Items[meta.activeIdx].FirstLocation, nil + return meta.global.Items[meta.activeIdx].FirstLocation.Clone(), nil } // FlushData returns sharding meta flush SQL and args diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index 029976c3d0..02a005944b 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -318,7 +318,7 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { Name: sg.firstLocation.Position.Name, Pos: sg.firstLocation.Position.Pos, }, - GTIDSet: sg.firstLocation.GTIDSet, + GTIDSet: sg.firstLocation.GTIDSet.Clone(), } } item := sg.meta.GetGlobalActiveDDL() @@ -329,7 +329,7 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { Name: item.FirstLocation.Position.Name, Pos: item.FirstLocation.Position.Pos, }, - GTIDSet: item.FirstLocation.GTIDSet, + GTIDSet: item.FirstLocation.GTIDSet.Clone(), } } return nil @@ -346,7 +346,7 @@ func (sg *ShardingGroup) FirstEndPosUnresolved() *binlog.Location { Name: sg.firstEndLocation.Position.Name, Pos: sg.firstEndLocation.Position.Pos, }, - GTIDSet: sg.firstEndLocation.GTIDSet, + GTIDSet: sg.firstEndLocation.GTIDSet.Clone(), } } return nil @@ -602,9 +602,9 @@ func (k *ShardingGroupKeeper) lowestFirstLocationInGroups() *binlog.Location { func (k *ShardingGroupKeeper) AdjustGlobalLocation(globalLocation binlog.Location) binlog.Location { lowestFirstLocation := k.lowestFirstLocationInGroups() if lowestFirstLocation != nil && binlog.CompareLocation(*lowestFirstLocation, globalLocation) < 0 { - return *lowestFirstLocation + return lowestFirstLocation.Clone() } - return globalLocation + return globalLocation.Clone() } // Groups returns all sharding groups, often used for debug diff --git a/syncer/status.go b/syncer/status.go index 2a5198daf8..2d581d5a6a 100644 --- a/syncer/status.go +++ b/syncer/status.go @@ -53,7 +53,10 @@ func (s *Syncer) Status() interface{} { if masterGTIDSet != nil { // masterGTIDSet maybe a nil interface st.MasterBinlogGtid = masterGTIDSet.String() } - st.SyncerBinlogGtid = syncerLocation.GTIDSet.String() + + if syncerLocation.GTIDSet != nil { + st.SyncerBinlogGtid = syncerLocation.GTIDSet.String() + } st.BinlogType = "unknown" if s.streamerController != nil { @@ -67,7 +70,13 @@ func (s *Syncer) Status() interface{} { if err != nil { s.tctx.L().Debug("fail to parse real mysql position", zap.Stringer("position", syncerLocation.Position), log.ShortError(err)) } - st.Synced = utils.CompareBinlogPos(masterPos, realPos, 0) == 0 + if s.cfg.EnableGTID { + if masterGTIDSet != nil && syncerLocation.GTIDSet != nil && masterGTIDSet.Equal(syncerLocation.GTIDSet) { + st.Synced = true + } + } else { + st.Synced = utils.CompareBinlogPos(masterPos, realPos, 0) == 0 + } if s.cfg.IsSharding { st.UnresolvedGroups = s.sgk.UnresolvedGroups() diff --git a/syncer/syncer.go b/syncer/syncer.go index 091fa1873d..1edf4a88d4 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -752,8 +752,7 @@ func (s *Syncer) addJob(job *job) error { func (s *Syncer) saveGlobalPoint(globalLocation binlog.Location) { if s.cfg.IsSharding { - // TODO: maybe need to compare GTID? - globalLocation = s.sgk.AdjustGlobalLocation(globalLocation).Clone() + globalLocation = s.sgk.AdjustGlobalLocation(globalLocation) } s.checkpoint.SaveGlobalPoint(globalLocation) } @@ -986,8 +985,8 @@ func (s *Syncer) Run(ctx context.Context) (err error) { // we use currentPos to replace and skip binlog event of specified position and update table checkpoint in sharding ddl // we use lastPos to update global checkpoint and table checkpoint var ( - currentLocation = s.checkpoint.GlobalPoint().Clone() // also init to global checkpoint - lastLocation = s.checkpoint.GlobalPoint().Clone() + currentLocation = s.checkpoint.GlobalPoint() // also init to global checkpoint + lastLocation = s.checkpoint.GlobalPoint() ) s.tctx.L().Info("replicate binlog from checkpoint", zap.Stringer("checkpoint", lastLocation)) @@ -1259,7 +1258,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { latestOp = xid currentLocation.Position.Pos = e.Header.LogPos currentLocation.GTIDSet.Set(ev.GSet) - s.tctx.L().Debug("", zap.String("event", "XID"), zap.Stringer("last location", lastLocation), log.WrapStringerField("location", currentLocation), log.WrapStringerField("gtid set", ev.GSet)) + s.tctx.L().Debug("", zap.String("event", "XID"), zap.Stringer("last location", lastLocation), log.WrapStringerField("location", currentLocation)) lastLocation.Position.Pos = e.Header.LogPos // update lastPos lastLocation.GTIDSet.Set(ev.GSet) @@ -1486,7 +1485,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e usedSchema := string(ev.Schema) parseResult, err := s.parseDDLSQL(sql, ec.parser2, usedSchema) if err != nil { - s.tctx.L().Error("fail to parse statement", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet), log.ShortError(err)) + s.tctx.L().Error("fail to parse statement", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.ShortError(err)) return err } @@ -1520,7 +1519,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e return nil } - s.tctx.L().Info("", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet)) + s.tctx.L().Info("", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation)) *ec.lastLocation = ec.currentLocation.Clone() // update lastLocation, because we have checked `isDDL` *ec.latestOp = ddl @@ -1533,10 +1532,10 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // so can handle sharding cases sqls, onlineDDLTableNames, err = s.resolveDDLSQL(ec.tctx, ec.parser2, parseResult.stmt, usedSchema) if err != nil { - s.tctx.L().Error("fail to resolve statement", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet), log.ShortError(err)) + s.tctx.L().Error("fail to resolve statement", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation), log.ShortError(err)) return err } - s.tctx.L().Info("resolve sql", zap.String("event", "query"), zap.String("raw statement", sql), zap.Strings("statements", sqls), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), zap.Stringer("location", ec.currentLocation), log.WrapStringerField("gtid set", ev.GSet)) + s.tctx.L().Info("resolve sql", zap.String("event", "query"), zap.String("raw statement", sql), zap.Strings("statements", sqls), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), zap.Stringer("location", ec.currentLocation)) if len(onlineDDLTableNames) > 1 { return terror.ErrSyncerUnitOnlineDDLOnMultipleTable.Generate(string(ev.Query)) @@ -1770,7 +1769,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e return err2 } *ec.shardingReSyncCh <- &ShardingReSync{ - currLocation: *firstEndLocation, + currLocation: firstEndLocation.Clone(), latestLocation: ec.currentLocation.Clone(), targetSchema: ddlInfo.tableNames[1][0].Schema, targetTable: ddlInfo.tableNames[1][0].Name, diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index 23be037da3..c3555b195e 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -807,7 +807,7 @@ func (s *testSyncerSuite) TestGeneratedColumn(c *C) { _, err = db.Exec("SET GLOBAL binlog_format = 'ROW';") c.Assert(err, IsNil) - pos, _, err := utils.GetMasterStatus(db, "mysql") + pos, gset, err := utils.GetMasterStatus(db, "mysql") c.Assert(err, IsNil) defer db.Exec("drop database if exists gctest_1") @@ -955,7 +955,7 @@ func (s *testSyncerSuite) TestGeneratedColumn(c *C) { syncer.reset() syncer.streamerController = NewStreamerController(tcontext.Background(), syncer.syncCfg, true, syncer.fromDB, syncer.binlogType, syncer.cfg.RelayDir, syncer.timezone) - err = syncer.streamerController.Start(tcontext.Background(), binlog.Location{Position: pos}) + err = syncer.streamerController.Start(tcontext.Background(), binlog.Location{Position: pos, GTIDSet: gset}) c.Assert(err, IsNil) for _, testCase := range testCases { From e2e705eda2bc1db71795a9eeb7226731e5c64e8d Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 23:42:42 +0800 Subject: [PATCH 17/35] address comment --- pkg/binlog/position.go | 20 +++++++ pkg/gtid/gtid.go | 9 +++ syncer/checkpoint.go | 114 ++++++++++++++++---------------------- syncer/checkpoint_test.go | 16 +++--- syncer/job_test.go | 8 +-- syncer/relay.go | 2 +- syncer/syncer.go | 2 +- syncer/syncer_test.go | 4 +- 8 files changed, 93 insertions(+), 82 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 6d962f2b6e..bf5b2b6609 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -37,6 +37,11 @@ const ( posUUIDSuffixSeparator = "|" ) +var ( + // MinPosition is the min binlog position + MinPosition = gmysql.Position{Pos: 4} +) + // PositionFromStr constructs a mysql.Position from a string representation like `mysql-bin.000001:2345` func PositionFromStr(s string) (gmysql.Position, error) { parsed := strings.Split(s, ":") @@ -172,6 +177,21 @@ type Location struct { GTIDSet gtid.Set } +// NewLocation returns a new Location +func NewLocation(flavor string) Location { + if flavor == gmysql.MariaDBFlavor { + return Location{ + Position: MinPosition, + GTIDSet: >id.MariadbGTIDSet{}, + } + } + + return Location{ + Position: MinPosition, + GTIDSet: >id.MySQLGTIDSet{}, + } +} + func (l Location) String() string { return fmt.Sprintf("position: %v, gtid-set: %s", l.Position, l.GTIDSet) } diff --git a/pkg/gtid/gtid.go b/pkg/gtid/gtid.go index 57c95b6669..329d4f7705 100644 --- a/pkg/gtid/gtid.go +++ b/pkg/gtid/gtid.go @@ -378,3 +378,12 @@ func (m *MariadbGTIDSet) String() string { } return m.set.String() } + +// MinGTIDSet returns the min GTID set +func MinGTIDSet(flavor string) Set { + if flavor == mysql.MariaDBFlavor { + return &MariadbGTIDSet{} + } + + return &MySQLGTIDSet{} +} diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 3677f3377e..e832366920 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -21,7 +21,7 @@ import ( "time" "github.com/pingcap/dm/dm/config" - pbinlog "github.com/pingcap/dm/pkg/binlog" + binlog "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/gtid" @@ -51,7 +51,6 @@ var ( globalCpSchema = "" // global checkpoint's cp_schema globalCpTable = "" // global checkpoint's cp_table maxCheckPointTimeout = "1m" - minPosition = mysql.Position{Pos: 4} maxCheckPointSaveTime = 30 * time.Second ) @@ -59,14 +58,14 @@ var ( type binlogPoint struct { sync.RWMutex - location pbinlog.Location + location binlog.Location ti *model.TableInfo - flushedLocation pbinlog.Location // location which flushed permanently + flushedLocation binlog.Location // location which flushed permanently flushedTI *model.TableInfo } -func newBinlogPoint(location, flushedLocation pbinlog.Location, ti, flushedTI *model.TableInfo) *binlogPoint { +func newBinlogPoint(location, flushedLocation binlog.Location, ti, flushedTI *model.TableInfo) *binlogPoint { return &binlogPoint{ location: location, ti: ti, @@ -75,11 +74,11 @@ func newBinlogPoint(location, flushedLocation pbinlog.Location, ti, flushedTI *m } } -func (b *binlogPoint) save(location pbinlog.Location, ti *model.TableInfo) error { +func (b *binlogPoint) save(location binlog.Location, ti *model.TableInfo) error { b.Lock() defer b.Unlock() - if pbinlog.CompareLocation(location, b.location) < 0 { + if binlog.CompareLocation(location, b.location) < 0 { // support to save equal pos, but not older pos return terror.ErrCheckpointSaveInvalidPos.Generate(location, b.location.Position) } @@ -110,18 +109,18 @@ func (b *binlogPoint) outOfDate() bool { b.RLock() defer b.RUnlock() - return pbinlog.CompareLocation(b.location, b.flushedLocation) > 0 + return binlog.CompareLocation(b.location, b.flushedLocation) > 0 } -// MySQLLocation returns point as pbinlog.Location -func (b *binlogPoint) MySQLLocation() pbinlog.Location { +// MySQLLocation returns point as binlog.Location +func (b *binlogPoint) MySQLLocation() binlog.Location { b.RLock() defer b.RUnlock() return b.location.Clone() } -// FlushedMySQLLocation returns flushed point as pbinlog.Location -func (b *binlogPoint) FlushedMySQLLocation() pbinlog.Location { +// FlushedMySQLLocation returns flushed point as binlog.Location +func (b *binlogPoint) FlushedMySQLLocation() binlog.Location { b.RLock() defer b.RUnlock() return b.flushedLocation @@ -167,7 +166,7 @@ type CheckPoint interface { LoadMeta() error // SaveTablePoint saves checkpoint for specified table in memory - SaveTablePoint(sourceSchema, sourceTable string, point pbinlog.Location, ti *model.TableInfo) + SaveTablePoint(sourceSchema, sourceTable string, point binlog.Location, ti *model.TableInfo) // DeleteTablePoint deletes checkpoint for specified table in memory and storage DeleteTablePoint(tctx *tcontext.Context, sourceSchema, sourceTable string) error @@ -176,11 +175,11 @@ type CheckPoint interface { DeleteSchemaPoint(tctx *tcontext.Context, sourceSchema string) error // IsNewerTablePoint checks whether job's checkpoint is newer than previous saved checkpoint - IsNewerTablePoint(sourceSchema, sourceTable string, point pbinlog.Location) bool + IsNewerTablePoint(sourceSchema, sourceTable string, point binlog.Location) bool // SaveGlobalPoint saves the global binlog stream's checkpoint // corresponding to Meta.Save - SaveGlobalPoint(point pbinlog.Location) + SaveGlobalPoint(point binlog.Location) // FlushGlobalPointsExcept flushes the global checkpoint and tables' // checkpoints except exceptTables, it also flushes SQLs with Args providing @@ -191,14 +190,14 @@ type CheckPoint interface { // GlobalPoint returns the global binlog stream's checkpoint // corresponding to Meta.Pos and Meta.GTID - GlobalPoint() pbinlog.Location + GlobalPoint() binlog.Location // TablePoint returns all table's stream checkpoint - TablePoint() map[string]map[string]pbinlog.Location + TablePoint() map[string]map[string]binlog.Location // FlushedGlobalPoint returns the flushed global binlog stream's checkpoint // corresponding to to Meta.Pos and gtid - FlushedGlobalPoint() pbinlog.Location + FlushedGlobalPoint() binlog.Location // CheckGlobalPoint checks whether we should save global checkpoint // corresponding to Meta.Check @@ -248,7 +247,7 @@ func NewRemoteCheckPoint(tctx *tcontext.Context, cfg *config.SubTaskConfig, id s tableName: dbutil.TableName(cfg.MetaSchema, cfg.Name+"_syncer_checkpoint"), id: id, points: make(map[string]map[string]*binlogPoint), - globalPoint: newBinlogPoint(minLocation(cfg.Flavor), minLocation(cfg.Flavor), nil, nil), + globalPoint: newBinlogPoint(binlog.NewLocation(cfg.Flavor), binlog.NewLocation(cfg.Flavor), nil, nil), logCtx: tcontext.Background().WithLogger(tctx.L().WithFields(zap.String("component", "remote checkpoint"))), } @@ -294,7 +293,7 @@ func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { return err } - cp.globalPoint = newBinlogPoint(minLocation(cp.cfg.Flavor), minLocation(cp.cfg.Flavor), nil, nil) + cp.globalPoint = newBinlogPoint(binlog.NewLocation(cp.cfg.Flavor), binlog.NewLocation(cp.cfg.Flavor), nil, nil) cp.points = make(map[string]map[string]*binlogPoint) @@ -302,15 +301,15 @@ func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { } // SaveTablePoint implements CheckPoint.SaveTablePoint -func (cp *RemoteCheckPoint) SaveTablePoint(sourceSchema, sourceTable string, point pbinlog.Location, ti *model.TableInfo) { +func (cp *RemoteCheckPoint) SaveTablePoint(sourceSchema, sourceTable string, point binlog.Location, ti *model.TableInfo) { cp.Lock() defer cp.Unlock() cp.saveTablePoint(sourceSchema, sourceTable, point, ti) } // saveTablePoint saves single table's checkpoint without mutex.Lock -func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, location pbinlog.Location, ti *model.TableInfo) { - if pbinlog.CompareLocation(cp.globalPoint.location, location) > 0 { +func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, location binlog.Location, ti *model.TableInfo) { + if binlog.CompareLocation(cp.globalPoint.location, location) > 0 { panic(fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v", location, cp.globalPoint)) } @@ -323,7 +322,7 @@ func (cp *RemoteCheckPoint) saveTablePoint(sourceSchema, sourceTable string, loc } point, ok := mSchema[sourceTable] if !ok { - mSchema[sourceTable] = newBinlogPoint(location, minLocation(cp.cfg.Flavor), ti, nil) + mSchema[sourceTable] = newBinlogPoint(location, binlog.NewLocation(cp.cfg.Flavor), ti, nil) } else if err := point.save(location, ti); err != nil { cp.logCtx.L().Error("fail to save table point", zap.String("schema", sourceSchema), zap.String("table", sourceTable), log.ShortError(err)) } @@ -378,7 +377,7 @@ func (cp *RemoteCheckPoint) DeleteSchemaPoint(tctx *tcontext.Context, sourceSche } // IsNewerTablePoint implements CheckPoint.IsNewerTablePoint -func (cp *RemoteCheckPoint) IsNewerTablePoint(sourceSchema, sourceTable string, location pbinlog.Location) bool { +func (cp *RemoteCheckPoint) IsNewerTablePoint(sourceSchema, sourceTable string, location binlog.Location) bool { cp.RLock() defer cp.RUnlock() mSchema, ok := cp.points[sourceSchema] @@ -391,11 +390,11 @@ func (cp *RemoteCheckPoint) IsNewerTablePoint(sourceSchema, sourceTable string, } oldLocation := point.MySQLLocation() - return pbinlog.CompareLocation(location, oldLocation) > 0 + return binlog.CompareLocation(location, oldLocation) > 0 } // SaveGlobalPoint implements CheckPoint.SaveGlobalPoint -func (cp *RemoteCheckPoint) SaveGlobalPoint(pos pbinlog.Location) { +func (cp *RemoteCheckPoint) SaveGlobalPoint(pos binlog.Location) { cp.Lock() defer cp.Unlock() @@ -476,18 +475,18 @@ func (cp *RemoteCheckPoint) FlushPointsExcept(tctx *tcontext.Context, exceptTabl } // GlobalPoint implements CheckPoint.GlobalPoint -func (cp *RemoteCheckPoint) GlobalPoint() pbinlog.Location { +func (cp *RemoteCheckPoint) GlobalPoint() binlog.Location { return cp.globalPoint.MySQLLocation() } // TablePoint implements CheckPoint.TablePoint -func (cp *RemoteCheckPoint) TablePoint() map[string]map[string]pbinlog.Location { +func (cp *RemoteCheckPoint) TablePoint() map[string]map[string]binlog.Location { cp.RLock() defer cp.RUnlock() - tablePoint := make(map[string]map[string]pbinlog.Location) + tablePoint := make(map[string]map[string]binlog.Location) for schema, tables := range cp.points { - tablePoint[schema] = make(map[string]pbinlog.Location) + tablePoint[schema] = make(map[string]binlog.Location) for table, point := range tables { tablePoint[schema][table] = point.MySQLLocation() } @@ -496,7 +495,7 @@ func (cp *RemoteCheckPoint) TablePoint() map[string]map[string]pbinlog.Location } // FlushedGlobalPoint implements CheckPoint.FlushedGlobalPoint -func (cp *RemoteCheckPoint) FlushedGlobalPoint() pbinlog.Location { +func (cp *RemoteCheckPoint) FlushedGlobalPoint() binlog.Location { cp.RLock() defer cp.RUnlock() @@ -621,11 +620,16 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T if err != nil { return terror.WithScope(terror.DBErrorAdapt(err, terror.ErrDBDriverError), terror.ScopeDownstream) } - gset, err := gtid.ParserGTID(cp.cfg.Flavor, binlogGTIDSet) - if err != nil { - return err + var gset gtid.Set + if len(binlogGTIDSet) == 0 { + gset = gtid.MinGTIDSet(cp.cfg.Flavor) + } else { + gset, err = gtid.ParserGTID(cp.cfg.Flavor, binlogGTIDSet) + if err != nil { + return err + } } - location := pbinlog.Location{ + location := binlog.Location{ Position: mysql.Position{ Name: binlogName, Pos: binlogPos, @@ -633,7 +637,7 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T GTIDSet: gset, } if isGlobal { - if pbinlog.CompareLocation(location, minLocation(cp.cfg.Flavor)) > 0 { + if binlog.CompareLocation(location, binlog.NewLocation(cp.cfg.Flavor)) > 0 { cp.globalPoint = newBinlogPoint(location, location, nil, nil) cp.logCtx.L().Info("fetch global checkpoint from DB", log.WrapStringerField("global checkpoint", cp.globalPoint)) } @@ -668,7 +672,7 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T // LoadMeta implements CheckPoint.LoadMeta func (cp *RemoteCheckPoint) LoadMeta() error { var ( - location *pbinlog.Location + location *binlog.Location err error ) switch cp.cfg.Mode { @@ -683,7 +687,7 @@ func (cp *RemoteCheckPoint) LoadMeta() error { // load meta from task config if cp.cfg.Meta == nil { cp.logCtx.L().Warn("don't set meta in increment task-mode") - location1 := minLocation(cp.cfg.Flavor) + location1 := binlog.NewLocation(cp.cfg.Flavor) cp.globalPoint = newBinlogPoint(location1, location1, nil, nil) return nil } @@ -692,10 +696,10 @@ func (cp *RemoteCheckPoint) LoadMeta() error { return err } if gset == nil { - gset = minGTIDSet(cp.cfg.Flavor) + gset = gtid.MinGTIDSet(cp.cfg.Flavor) } - location = &pbinlog.Location{ + location = &binlog.Location{ Position: mysql.Position{ Name: cp.cfg.Meta.BinLogName, Pos: cp.cfg.Meta.BinLogPos, @@ -717,7 +721,7 @@ func (cp *RemoteCheckPoint) LoadMeta() error { } // genUpdateSQL generates SQL and arguments for update checkpoint -func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location pbinlog.Location, tiBytes []byte, isGlobal bool) (string, []interface{}) { +func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location binlog.Location, tiBytes []byte, isGlobal bool) (string, []interface{}) { // use `INSERT INTO ... ON DUPLICATE KEY UPDATE` rather than `REPLACE INTO` // to keep `create_time`, `update_time` correctly sql2 := `INSERT INTO ` + cp.tableName + ` @@ -743,7 +747,7 @@ func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location pbin return sql2, args } -func (cp *RemoteCheckPoint) parseMetaData() (*pbinlog.Location, error) { +func (cp *RemoteCheckPoint) parseMetaData() (*binlog.Location, error) { // `metadata` is mydumper's output meta file name filename := path.Join(cp.cfg.Dir, "metadata") cp.logCtx.L().Info("parsing metadata from file", zap.String("file", filename)) @@ -757,30 +761,8 @@ func (cp *RemoteCheckPoint) parseMetaData() (*pbinlog.Location, error) { return nil, err } - return &pbinlog.Location{ + return &binlog.Location{ Position: pos, GTIDSet: gset, }, nil } - -func minLocation(flavor string) pbinlog.Location { - if flavor == mysql.MySQLFlavor { - return pbinlog.Location{ - Position: minPosition, - GTIDSet: minGTIDSet(flavor), - } - } - - return pbinlog.Location{ - Position: minPosition, - GTIDSet: minGTIDSet(flavor), - } -} - -func minGTIDSet(flavor string) gtid.Set { - if flavor == mysql.MariaDBFlavor { - return >id.MariadbGTIDSet{} - } - - return >id.MySQLGTIDSet{} -} diff --git a/syncer/checkpoint_test.go b/syncer/checkpoint_test.go index cc4cf6617e..bfad0c3373 100644 --- a/syncer/checkpoint_test.go +++ b/syncer/checkpoint_test.go @@ -124,15 +124,15 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { tctx := tcontext.Background() // global checkpoint init to min - c.Assert(cp.GlobalPoint(), Equals, minLocation) - c.Assert(cp.FlushedGlobalPoint(), Equals, minLocation) + c.Assert(cp.GlobalPoint(), Equals, binlog.NewLocation("")) + c.Assert(cp.FlushedGlobalPoint(), Equals, binlog.NewLocation("")) // try load, but should load nothing s.mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(sqlmock.NewRows(nil)) err := cp.Load(tctx, s.tracker) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, minLocation) - c.Assert(cp.FlushedGlobalPoint(), Equals, minLocation) + c.Assert(cp.GlobalPoint(), Equals, binlog.NewLocation("")) + c.Assert(cp.FlushedGlobalPoint(), Equals, binlog.NewLocation("")) oldMode := s.cfg.Mode oldDir := s.cfg.Dir @@ -240,14 +240,14 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { s.mock.ExpectCommit() err = cp.Clear(tctx) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, minLocation) - c.Assert(cp.FlushedGlobalPoint(), Equals, minLocation) + c.Assert(cp.GlobalPoint(), Equals, binlog.NewLocation("")) + c.Assert(cp.FlushedGlobalPoint(), Equals, binlog.NewLocation("")) s.mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(sqlmock.NewRows(nil)) err = cp.Load(tctx, s.tracker) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, minLocation) - c.Assert(cp.FlushedGlobalPoint(), Equals, minLocation) + c.Assert(cp.GlobalPoint(), Equals, binlog.NewLocation("")) + c.Assert(cp.FlushedGlobalPoint(), Equals, binlog.NewLocation("")) } func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { diff --git a/syncer/job_test.go b/syncer/job_test.go index 6948104505..a0f4470bfc 100644 --- a/syncer/job_test.go +++ b/syncer/job_test.go @@ -87,19 +87,19 @@ func (t *testJobSuite) TestJob(c *C) { jobStr string }{ { - newJob(insert, "test", "t1", "test", "t1", "insert into test.t1 values(?)", []interface{}{1}, "1", binlog.Location{}, binlog.Location{}, ""), + newJob(insert, "test", "t1", "test", "t1", "insert into test.t1 values(?)", []interface{}{1}, "1", binlog.NewLocation(""), binlog.NewLocation(""), ""), "tp: insert, sql: insert into test.t1 values(?), args: [1], key: 1, ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", }, { - newDDLJob(ddlInfo, []string{"create database test"}, binlog.Location{}, binlog.Location{}, ""), + newDDLJob(ddlInfo, []string{"create database test"}, binlog.NewLocation(""), binlog.NewLocation(""), ""), "tp: ddl, sql: , args: [], key: , ddls: [create database test], last_pos: (, 0), current_pos: (, 0), gtid:", }, { - newXIDJob(binlog.Location{}, binlog.Location{}, ""), + newXIDJob(binlog.NewLocation(""), binlog.NewLocation(""), ""), "tp: xid, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", }, { newFlushJob(), "tp: flush, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", }, { - newSkipJob(binlog.Location{}), + newSkipJob(binlog.NewLocation("")), "tp: skip, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", }, } diff --git a/syncer/relay.go b/syncer/relay.go index bf4af33441..9fa63bbca7 100644 --- a/syncer/relay.go +++ b/syncer/relay.go @@ -48,7 +48,7 @@ func (s *Syncer) setInitActiveRelayLog() error { } checkLocation := s.checkpoint.GlobalPoint() - if binlog.ComparePosition(checkLocation.Position, minPosition) > 0 { + if binlog.ComparePosition(checkLocation.Position, binlog.MinPosition) > 0 { // continue from previous checkpoint pos = checkLocation.Position extractPos = true diff --git a/syncer/syncer.go b/syncer/syncer.go index 1edf4a88d4..f2438acb42 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -425,7 +425,7 @@ func (s *Syncer) initShardingGroups() error { func (s *Syncer) IsFreshTask(ctx context.Context) (bool, error) { globalPoint := s.checkpoint.GlobalPoint() tablePoint := s.checkpoint.TablePoint() - return binlog.CompareLocation(globalPoint, minLocation(s.cfg.Flavor)) <= 0 && len(tablePoint) == 0, nil + return binlog.CompareLocation(globalPoint, binlog.NewLocation(s.cfg.Flavor)) <= 0 && len(tablePoint) == 0, nil } func (s *Syncer) reset() { diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index c3555b195e..e14739a190 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -1145,7 +1145,7 @@ func (s *testSyncerSuite) TestRun(c *C) { } mockStreamerProducer := &MockStreamProducer{s.generateEvents(events1, c)} - mockStreamer, err := mockStreamerProducer.generateStreamer(binlog.Location{}) + mockStreamer, err := mockStreamerProducer.generateStreamer(binlog.NewLocation("")) c.Assert(err, IsNil) syncer.streamerController = &StreamerController{ streamerProducer: mockStreamerProducer, @@ -1248,7 +1248,7 @@ func (s *testSyncerSuite) TestRun(c *C) { // simulate `syncer.Resume` here, but doesn't reset database conns syncer.reset() mockStreamerProducer = &MockStreamProducer{s.generateEvents(events2, c)} - mockStreamer, err = mockStreamerProducer.generateStreamer(binlog.Location{}) + mockStreamer, err = mockStreamerProducer.generateStreamer(binlog.NewLocation("")) c.Assert(err, IsNil) syncer.streamerController = &StreamerController{ streamerProducer: mockStreamerProducer, From ffeb7eb2e226d811f3fc84f152874b8c368b822a Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sat, 7 Mar 2020 23:44:43 +0800 Subject: [PATCH 18/35] add clone --- syncer/checkpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index e832366920..1b369fdd97 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -123,7 +123,7 @@ func (b *binlogPoint) MySQLLocation() binlog.Location { func (b *binlogPoint) FlushedMySQLLocation() binlog.Location { b.RLock() defer b.RUnlock() - return b.flushedLocation + return b.flushedLocation.Clone() } // TableInfo returns the table schema associated at the current binlog position. From 8132f8f89d6dc42c5e3e7924fabd2f6c48d4aecf Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 8 Mar 2020 12:08:30 +0800 Subject: [PATCH 19/35] address comemnt and fix test --- pkg/binlog/position.go | 15 ++++++-------- pkg/gtid/gtid.go | 34 +++++++++++++++++++++---------- pkg/gtid/gtid_test.go | 16 +++++++++++++++ pkg/utils/mydumper_test.go | 6 +++--- syncer/checkpoint.go | 13 +++++------- syncer/job_test.go | 10 ++++----- syncer/sharding-meta/shardmeta.go | 5 ++++- 7 files changed, 62 insertions(+), 37 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index bf5b2b6609..1423ceb6f2 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -179,16 +179,9 @@ type Location struct { // NewLocation returns a new Location func NewLocation(flavor string) Location { - if flavor == gmysql.MariaDBFlavor { - return Location{ - Position: MinPosition, - GTIDSet: >id.MariadbGTIDSet{}, - } - } - return Location{ Position: MinPosition, - GTIDSet: >id.MySQLGTIDSet{}, + GTIDSet: gtid.MinGTIDSet(flavor), } } @@ -198,7 +191,11 @@ func (l Location) String() string { // Clone clones a same Location func (l Location) Clone() Location { - newGTIDSet := l.GTIDSet.Clone() + var newGTIDSet gtid.Set + if l.GTIDSet != nil { + newGTIDSet = l.GTIDSet.Clone() + } + return Location{ Position: l.Position, GTIDSet: newGTIDSet, diff --git a/pkg/gtid/gtid.go b/pkg/gtid/gtid.go index 329d4f7705..3fefc72625 100644 --- a/pkg/gtid/gtid.go +++ b/pkg/gtid/gtid.go @@ -68,6 +68,21 @@ func ParserGTID(flavor, gtidStr string) (Set, error) { return m, err } +// MinGTIDSet returns the min GTID set +func MinGTIDSet(flavor string) Set { + // use mysql as default + if flavor != mysql.MariaDBFlavor && flavor != mysql.MySQLFlavor { + flavor = mysql.MySQLFlavor + } + + gset, err := ParserGTID(flavor, "") + if err != nil { + // this should not happen + panic(err) + } + return gset +} + /************************ mysql gtid set ***************************/ // MySQLGTIDSet wraps mysql.MysqlGTIDSet to implement gtidSet interface @@ -137,7 +152,7 @@ func (g *MySQLGTIDSet) get(uuid string) (*mysql.UUIDSet, bool) { // Clone implements Set.Clone func (g *MySQLGTIDSet) Clone() Set { if g.set == nil { - return &MySQLGTIDSet{} + return MinGTIDSet(mysql.MySQLFlavor) } return &MySQLGTIDSet{ @@ -147,6 +162,9 @@ func (g *MySQLGTIDSet) Clone() Set { // Origin implements Set.Origin func (g *MySQLGTIDSet) Origin() mysql.GTIDSet { + if g.set == nil { + return MinGTIDSet(mysql.MySQLFlavor) + } return g.set.Clone().(*mysql.MysqlGTIDSet) } @@ -295,7 +313,7 @@ func (m *MariadbGTIDSet) get(domainID uint32) (*mysql.MariadbGTID, bool) { // Clone implements Set.Clone func (m *MariadbGTIDSet) Clone() Set { if m.set == nil { - return &MariadbGTIDSet{} + return MinGTIDSet(mysql.MariaDBFlavor) } return &MariadbGTIDSet{ set: m.set.Clone().(*mysql.MariadbGTIDSet), @@ -304,6 +322,9 @@ func (m *MariadbGTIDSet) Clone() Set { // Origin implements Set.Origin func (m *MariadbGTIDSet) Origin() mysql.GTIDSet { + if m.set == nil { + return MinGTIDSet(mysql.MariaDBFlavor) + } return m.set.Clone().(*mysql.MariadbGTIDSet) } @@ -378,12 +399,3 @@ func (m *MariadbGTIDSet) String() string { } return m.set.String() } - -// MinGTIDSet returns the min GTID set -func MinGTIDSet(flavor string) Set { - if flavor == mysql.MariaDBFlavor { - return &MariadbGTIDSet{} - } - - return &MySQLGTIDSet{} -} diff --git a/pkg/gtid/gtid_test.go b/pkg/gtid/gtid_test.go index 7ec1a004d0..3feebeb08e 100644 --- a/pkg/gtid/gtid_test.go +++ b/pkg/gtid/gtid_test.go @@ -18,6 +18,7 @@ import ( "testing" . "github.com/pingcap/check" + "github.com/siddontang/go-mysql/mysql" "github.com/pingcap/dm/pkg/terror" ) @@ -75,6 +76,21 @@ func (s *testGTIDSuite) TestGTID(c *C) { } } +func (s *testGTIDSuite) TestMinGTIDSet(c *C) { + gset := MinGTIDSet(mysql.MySQLFlavor) + _, ok := gset.(*MySQLGTIDSet) + c.Assert(ok, IsTrue) + + gset = MinGTIDSet(mysql.MariaDBFlavor) + _, ok = gset.(*MariadbGTIDSet) + c.Assert(ok, IsTrue) + + // will treat as mysql gtid set + gset = MinGTIDSet("wrong flavor") + _, ok = gset.(*MySQLGTIDSet) + c.Assert(ok, IsTrue) +} + func (s *testGTIDSuite) TestMySQLGTIDEqual(c *C) { var ( g1 *MySQLGTIDSet diff --git a/pkg/utils/mydumper_test.go b/pkg/utils/mydumper_test.go index a12856a7b9..9242fc5e59 100644 --- a/pkg/utils/mydumper_test.go +++ b/pkg/utils/mydumper_test.go @@ -28,7 +28,7 @@ func (t *testUtilsSuite) TestParseMetaData(c *C) { testCases := []struct { source string - pos *mysql.Position + pos mysql.Position gsetStr string }{ { @@ -39,7 +39,7 @@ SHOW MASTER STATUS: GTID:97b5142f-e19c-11e8-808c-0242ac110005:1-13 Finished dump at: 2018-12-28 07:20:51`, - &mysql.Position{ + mysql.Position{ Name: "bin.000001", Pos: 2479, }, @@ -59,7 +59,7 @@ SHOW SLAVE STATUS: GTID: Finished dump at: 2018-12-27 19:51:22`, - &mysql.Position{ + mysql.Position{ Name: "mysql-bin.000003", Pos: 3295817, }, diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 1b369fdd97..c7131cc3cf 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -620,15 +620,12 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T if err != nil { return terror.WithScope(terror.DBErrorAdapt(err, terror.ErrDBDriverError), terror.ScopeDownstream) } - var gset gtid.Set - if len(binlogGTIDSet) == 0 { - gset = gtid.MinGTIDSet(cp.cfg.Flavor) - } else { - gset, err = gtid.ParserGTID(cp.cfg.Flavor, binlogGTIDSet) - if err != nil { - return err - } + + gset, err := gtid.ParserGTID(cp.cfg.Flavor, binlogGTIDSet) + if err != nil { + return err } + location := binlog.Location{ Position: mysql.Position{ Name: binlogName, diff --git a/syncer/job_test.go b/syncer/job_test.go index a0f4470bfc..1c7a022afa 100644 --- a/syncer/job_test.go +++ b/syncer/job_test.go @@ -88,19 +88,19 @@ func (t *testJobSuite) TestJob(c *C) { }{ { newJob(insert, "test", "t1", "test", "t1", "insert into test.t1 values(?)", []interface{}{1}, "1", binlog.NewLocation(""), binlog.NewLocation(""), ""), - "tp: insert, sql: insert into test.t1 values(?), args: [1], key: 1, ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", + "tp: insert, sql: insert into test.t1 values(?), args: [1], key: 1, ddls: [], last_location: position: (, 4), gtid-set: , current_location: position: (, 4), gtid-set: ", }, { newDDLJob(ddlInfo, []string{"create database test"}, binlog.NewLocation(""), binlog.NewLocation(""), ""), - "tp: ddl, sql: , args: [], key: , ddls: [create database test], last_pos: (, 0), current_pos: (, 0), gtid:", + "tp: ddl, sql: , args: [], key: , ddls: [create database test], last_location: position: (, 4), gtid-set: , current_location: position: (, 4), gtid-set: ", }, { newXIDJob(binlog.NewLocation(""), binlog.NewLocation(""), ""), - "tp: xid, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", + "tp: xid, sql: , args: [], key: , ddls: [], last_location: position: (, 4), gtid-set: , current_location: position: (, 4), gtid-set: ", }, { newFlushJob(), - "tp: flush, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", + "tp: flush, sql: , args: [], key: , ddls: [], last_location: position: (, 4), gtid-set: , current_location: position: (, 4), gtid-set: ", }, { newSkipJob(binlog.NewLocation("")), - "tp: skip, sql: , args: [], key: , ddls: [], last_pos: (, 0), current_pos: (, 0), gtid:", + "tp: skip, sql: , args: [], key: , ddls: [], last_location: position: (, 4), gtid-set: , current_location: position: (, 4), gtid-set: ", }, } diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index 646023e87e..a7765e7176 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -251,6 +251,7 @@ func (meta *ShardingMeta) ActiveDDLFirstLocation() (binlog.Location, error) { if meta.activeIdx >= len(meta.global.Items) { return binlog.Location{}, terror.ErrSyncUnitDDLActiveIndexLarger.Generate(meta.activeIdx, meta.global.Items) } + return meta.global.Items[meta.activeIdx].FirstLocation.Clone(), nil } @@ -259,7 +260,9 @@ func (meta *ShardingMeta) FlushData(sourceID, tableID string) ([]string, [][]int // set FirstPosition and FirstGTIDSet for json marshal for _, item := range meta.global.Items { item.FirstPosition = item.FirstLocation.Position - item.FirstGTIDSet = item.FirstLocation.GTIDSet.String() + if item.FirstLocation.GTIDSet != nil { + item.FirstGTIDSet = item.FirstLocation.GTIDSet.String() + } } if len(meta.global.Items) == 0 { From 118c8f1aa6441960656f75bab6038f1e1ffbe22e Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 8 Mar 2020 12:41:23 +0800 Subject: [PATCH 20/35] minor fix and add test for compare location --- pkg/binlog/position_test.go | 195 ++++++++++++++++++++++++++++++++++++ pkg/gtid/gtid.go | 4 +- 2 files changed, 197 insertions(+), 2 deletions(-) diff --git a/pkg/binlog/position_test.go b/pkg/binlog/position_test.go index 6302eb7ec1..b3a80af6c8 100644 --- a/pkg/binlog/position_test.go +++ b/pkg/binlog/position_test.go @@ -18,6 +18,8 @@ import ( . "github.com/pingcap/check" gmysql "github.com/siddontang/go-mysql/mysql" + + "github.com/pingcap/dm/pkg/gtid" ) var _ = Suite(&testPositionSuite{}) @@ -347,3 +349,196 @@ func (t *testPositionSuite) TestComparePosition(c *C) { c.Assert(cmp, Equals, cs.cmp) } } + +func (t *testPositionSuite) TestCompareCompareLocation(c *C) { + testCases := []struct { + flavor string + pos1 gmysql.Position + gset1 string + pos2 gmysql.Position + gset2 string + cmp int + }{ + { + // pos1 = pos2 + gmysql.MySQLFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "", + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "", + 0, + }, { + // pos1 = pos2 + gmysql.MariaDBFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "", + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "", + 0, + }, { + // pos1 < pos2 + gmysql.MariaDBFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "", + gmysql.Position{ + Name: "binlog.00002", + Pos: 122, + }, + "", + -1, + }, { + // pos1 > pos2 + gmysql.MySQLFlavor, + gmysql.Position{ + Name: "binlog.00003", + Pos: 123, + }, + "", + gmysql.Position{ + Name: "binlog.00002", + Pos: 122, + }, + "", + 1, + }, { + // gset1 = gset2, pos1 < pos2 + gmysql.MySQLFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "53ea0ed1-9bf8-11e6-8bea-64006a897c73:1-4", + gmysql.Position{ + Name: "binlog.00002", + Pos: 122, + }, + "53ea0ed1-9bf8-11e6-8bea-64006a897c73:1-4", + -1, + }, { + // compare by gtid set, gset2 contains gset1 + gmysql.MySQLFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "53ea0ed1-9bf8-11e6-8bea-64006a897c73:1-2,53ea0ed1-9bf8-11e6-8bea-64006a897c74:1-2", + gmysql.Position{ + Name: "binlog.00002", + Pos: 124, + }, + "53ea0ed1-9bf8-11e6-8bea-64006a897c73:1-4,53ea0ed1-9bf8-11e6-8bea-64006a897c74:1-3", + -1, + }, { + // compare by gtid set, gset1 contains gset2 + gmysql.MySQLFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "53ea0ed1-9bf8-11e6-8bea-64006a897c73:1-2,53ea0ed1-9bf8-11e6-8bea-64006a897c74:1-3", + gmysql.Position{ + Name: "binlog.00002", + Pos: 124, + }, + "53ea0ed1-9bf8-11e6-8bea-64006a897c73:1-2", + 1, + }, { + // can't compare by gtid set, will compare by position, pos1 < pos2 + gmysql.MySQLFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "53ea0ed1-9bf8-11e6-8bea-64006a897c73:1-2,53ea0ed1-9bf8-11e6-8bea-64006a897c74:2-4", + gmysql.Position{ + Name: "binlog.00002", + Pos: 124, + }, + "53ea0ed1-9bf8-11e6-8bea-64006a897c73:1-2,53ea0ed1-9bf8-11e6-8bea-64006a897c74:1-3", + -1, + }, { + // gset1 = gset2, pos1 < pos2 + gmysql.MariaDBFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "1-1-1,2-2-2", + gmysql.Position{ + Name: "binlog.00002", + Pos: 122, + }, + "1-1-1,2-2-2", + -1, + }, { + // compare by gtid set, gset2 contains gset1 + gmysql.MariaDBFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "1-1-1,2-2-2", + gmysql.Position{ + Name: "binlog.00002", + Pos: 124, + }, + "1-1-1,2-2-2,3-3-3", + -1, + }, { + // compare by gtid set, gset1 contains gset2 + gmysql.MariaDBFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "1-1-1,2-2-3", + gmysql.Position{ + Name: "binlog.00002", + Pos: 124, + }, + "1-1-1,2-2-2", + 1, + }, { + // can't compare by gtid set, will compare by position, pos1 < pos2 + gmysql.MariaDBFlavor, + gmysql.Position{ + Name: "binlog.00001", + Pos: 123, + }, + "1-1-1,2-2-2", + gmysql.Position{ + Name: "binlog.00002", + Pos: 124, + }, + "2-2-2,3-3-3", + -1, + }, + } + + for _, cs := range testCases { + c.Log(cs) + gset1, err := gtid.ParserGTID(cs.flavor, cs.gset1) + c.Assert(err, IsNil) + gset2, err := gtid.ParserGTID(cs.flavor, cs.gset2) + c.Assert(err, IsNil) + + cmp := CompareLocation(Location{cs.pos1, gset1}, Location{cs.pos2, gset2}) + c.Assert(cmp, Equals, cs.cmp) + } + +} diff --git a/pkg/gtid/gtid.go b/pkg/gtid/gtid.go index 3fefc72625..22a3484757 100644 --- a/pkg/gtid/gtid.go +++ b/pkg/gtid/gtid.go @@ -163,7 +163,7 @@ func (g *MySQLGTIDSet) Clone() Set { // Origin implements Set.Origin func (g *MySQLGTIDSet) Origin() mysql.GTIDSet { if g.set == nil { - return MinGTIDSet(mysql.MySQLFlavor) + return &mysql.MysqlGTIDSet{} } return g.set.Clone().(*mysql.MysqlGTIDSet) } @@ -323,7 +323,7 @@ func (m *MariadbGTIDSet) Clone() Set { // Origin implements Set.Origin func (m *MariadbGTIDSet) Origin() mysql.GTIDSet { if m.set == nil { - return MinGTIDSet(mysql.MariaDBFlavor) + return &mysql.MariadbGTIDSet{} } return m.set.Clone().(*mysql.MariadbGTIDSet) } From 109fdd58c2de67f612265dc69bc7617c31af7693 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 8 Mar 2020 15:11:06 +0800 Subject: [PATCH 21/35] fix test --- pkg/binlog/position.go | 16 +++++++++++++- pkg/terror/error_list.go | 2 +- syncer/checkpoint.go | 6 +++++- syncer/checkpoint_test.go | 10 +++++---- syncer/job_test.go | 4 ++-- syncer/sharding-meta/shardmeta_test.go | 9 +++++++- syncer/sharding_group.go | 29 ++++++++++++++++++++------ syncer/sharding_group_test.go | 6 +++--- syncer/streamer_controller.go | 5 +---- 9 files changed, 64 insertions(+), 23 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 1423ceb6f2..dd96d481a3 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -186,7 +186,11 @@ func NewLocation(flavor string) Location { } func (l Location) String() string { - return fmt.Sprintf("position: %v, gtid-set: %s", l.Position, l.GTIDSet) + gsetStr := "" + if l.GTIDSet != nil { + gsetStr = l.GTIDSet.String() + } + return fmt.Sprintf("position: %v, gtid-set: %s", l.Position, gsetStr) } // Clone clones a same Location @@ -202,6 +206,16 @@ func (l Location) Clone() Location { } } +// CloneGTIDSet clones location's gtid set +func (l Location) CloneGTIDSet() gtid.Set { + if l.GTIDSet != nil { + return l.GTIDSet.Clone() + } + + // return a min mysql gtid set to avoid nil pointer panic + return gtid.MinGTIDSet("") +} + // CompareLocation returns: // 1 if point1 is bigger than point2 // 0 if point1 is equal to point2 diff --git a/pkg/terror/error_list.go b/pkg/terror/error_list.go index e358285929..d7ac36cafe 100644 --- a/pkg/terror/error_list.go +++ b/pkg/terror/error_list.go @@ -712,7 +712,7 @@ var ( // Checkpoint error ErrCheckpointInvalidTaskMode = New(codeCheckpointInvalidTaskMode, ClassCheckpoint, ScopeInternal, LevelMedium, "invalid task mode: %s") - ErrCheckpointSaveInvalidPos = New(codeCheckpointSaveInvalidPos, ClassCheckpoint, ScopeInternal, LevelHigh, "save point %v is older than current pos %v") + ErrCheckpointSaveInvalidPos = New(codeCheckpointSaveInvalidPos, ClassCheckpoint, ScopeInternal, LevelHigh, "save point %s is older than current location %s") ErrCheckpointInvalidTableFile = New(codeCheckpointInvalidTableFile, ClassCheckpoint, ScopeInternal, LevelMedium, "invalid db table sql file - %s") ErrCheckpointDBNotExistInFile = New(codeCheckpointDBNotExistInFile, ClassCheckpoint, ScopeInternal, LevelMedium, "db (%s) not exist in data files, but in checkpoint") ErrCheckpointTableNotExistInFile = New(codeCheckpointTableNotExistInFile, ClassCheckpoint, ScopeInternal, LevelMedium, "table (%s) not exist in db (%s) data files, but in checkpoint") diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index c7131cc3cf..608c77e254 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -740,7 +740,11 @@ func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location binl if len(tiBytes) == 0 { tiBytes = []byte("null") } - args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, location.GTIDSet.String(), tiBytes, isGlobal} + gsetStr := "" + if location.GTIDSet != nil { + gsetStr = location.GTIDSet.String() + } + args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, gsetStr, tiBytes, isGlobal} return sql2, args } diff --git a/syncer/checkpoint_test.go b/syncer/checkpoint_test.go index bfad0c3373..2d5b2003bf 100644 --- a/syncer/checkpoint_test.go +++ b/syncer/checkpoint_test.go @@ -29,6 +29,7 @@ import ( "github.com/DATA-DOG/go-sqlmock" . "github.com/pingcap/check" + "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/siddontang/go-mysql/mysql" "go.uber.org/zap/zapcore" @@ -124,15 +125,15 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { tctx := tcontext.Background() // global checkpoint init to min - c.Assert(cp.GlobalPoint(), Equals, binlog.NewLocation("")) - c.Assert(cp.FlushedGlobalPoint(), Equals, binlog.NewLocation("")) + c.Assert(cp.GlobalPoint().Position, Equals, binlog.MinPosition) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, binlog.MinPosition) // try load, but should load nothing s.mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(sqlmock.NewRows(nil)) err := cp.Load(tctx, s.tracker) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, binlog.NewLocation("")) - c.Assert(cp.FlushedGlobalPoint(), Equals, binlog.NewLocation("")) + c.Assert(cp.GlobalPoint().Position, Equals, binlog.MinPosition) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, binlog.MinPosition) oldMode := s.cfg.Mode oldDir := s.cfg.Dir @@ -167,6 +168,7 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { s.mock.ExpectExec("(162)?"+flushCheckPointSQL).WithArgs(cpid, "", "", pos1.Name, pos1.Pos, []byte("null"), true).WillReturnResult(sqlmock.NewResult(0, 1)) s.mock.ExpectCommit() err = cp.FlushPointsExcept(tctx, nil, nil, nil) + c.Log(errors.ErrorStack(err)) c.Assert(err, IsNil) c.Assert(cp.GlobalPoint().Position, Equals, pos1) c.Assert(cp.FlushedGlobalPoint().Position, Equals, pos1) diff --git a/syncer/job_test.go b/syncer/job_test.go index 1c7a022afa..f8f95ac3ff 100644 --- a/syncer/job_test.go +++ b/syncer/job_test.go @@ -97,10 +97,10 @@ func (t *testJobSuite) TestJob(c *C) { "tp: xid, sql: , args: [], key: , ddls: [], last_location: position: (, 4), gtid-set: , current_location: position: (, 4), gtid-set: ", }, { newFlushJob(), - "tp: flush, sql: , args: [], key: , ddls: [], last_location: position: (, 4), gtid-set: , current_location: position: (, 4), gtid-set: ", + "tp: flush, sql: , args: [], key: , ddls: [], last_location: position: (, 0), gtid-set: , current_location: position: (, 0), gtid-set: ", }, { newSkipJob(binlog.NewLocation("")), - "tp: skip, sql: , args: [], key: , ddls: [], last_location: position: (, 4), gtid-set: , current_location: position: (, 4), gtid-set: ", + "tp: skip, sql: , args: [], key: , ddls: [], last_location: position: (, 4), gtid-set: , current_location: position: (, 0), gtid-set: ", }, } diff --git a/syncer/sharding-meta/shardmeta_test.go b/syncer/sharding-meta/shardmeta_test.go index 4c0c4569f4..fac992cab2 100644 --- a/syncer/sharding-meta/shardmeta_test.go +++ b/syncer/sharding-meta/shardmeta_test.go @@ -270,5 +270,12 @@ func (t *testShardMetaSuite) TestFlushLoadMeta(c *check.C) { c.Assert(arg, check.HasLen, 8) loadedMeta.RestoreFromData(arg[2].(string), arg[3].(int), arg[4].(bool), []byte(arg[5].(string)), mysql.MySQLFlavor) } - c.Assert(loadedMeta, check.DeepEquals, meta) + c.Assert(loadedMeta.activeIdx, check.Equals, meta.activeIdx) + c.Assert(loadedMeta.global.String(), check.Equals, meta.global.String()) + c.Assert(loadedMeta.schema, check.Equals, meta.schema) + c.Assert(loadedMeta.table, check.Equals, meta.table) + c.Assert(len(loadedMeta.sources), check.Equals, len(meta.sources)) + for table, source := range loadedMeta.sources { + c.Assert(source.String(), check.Equals, meta.sources[table].String()) + } } diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index 02a005944b..a80a301df4 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -80,6 +80,7 @@ import ( "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" + "github.com/pingcap/dm/pkg/gtid" "github.com/pingcap/dm/pkg/terror" shardmeta "github.com/pingcap/dm/syncer/sharding-meta" @@ -105,10 +106,12 @@ type ShardingGroup struct { firstLocation *binlog.Location // first DDL's binlog pos and gtid, used to restrain the global checkpoint when un-resolved firstEndLocation *binlog.Location // first DDL's binlog End_log_pos and gtid, used to re-direct binlog streamer after synced ddls []string // DDL which current in syncing + + flavor string } // NewShardingGroup creates a new ShardingGroup -func NewShardingGroup(sourceID, shardMetaSchema, shardMetaTable string, sources []string, meta *shardmeta.ShardingMeta, isSchemaOnly bool) *ShardingGroup { +func NewShardingGroup(sourceID, shardMetaSchema, shardMetaTable string, sources []string, meta *shardmeta.ShardingMeta, isSchemaOnly bool, flavor string) *ShardingGroup { sg := &ShardingGroup{ remain: len(sources), sources: make(map[string]bool, len(sources)), @@ -116,6 +119,7 @@ func NewShardingGroup(sourceID, shardMetaSchema, shardMetaTable string, sources sourceID: sourceID, firstLocation: nil, firstEndLocation: nil, + flavor: flavor, } if meta != nil { sg.meta = meta @@ -313,23 +317,32 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { defer sg.RUnlock() if sg.remain < len(sg.sources) && sg.firstLocation != nil { // create a new pos to return + gset := gtid.MinGTIDSet(sg.flavor) + if sg.firstLocation.GTIDSet != nil { + gset = sg.firstLocation.GTIDSet.Clone() + } + return &binlog.Location{ Position: mysql.Position{ Name: sg.firstLocation.Position.Name, Pos: sg.firstLocation.Position.Pos, }, - GTIDSet: sg.firstLocation.GTIDSet.Clone(), + GTIDSet: gset, } } item := sg.meta.GetGlobalActiveDDL() if item != nil { // make a new copy + gset := gtid.MinGTIDSet(sg.flavor) + if item.FirstLocation.GTIDSet != nil { + gset = item.FirstLocation.GTIDSet.Clone() + } return &binlog.Location{ Position: mysql.Position{ Name: item.FirstLocation.Position.Name, Pos: item.FirstLocation.Position.Pos, }, - GTIDSet: item.FirstLocation.GTIDSet.Clone(), + GTIDSet: gset, } } return nil @@ -341,12 +354,16 @@ func (sg *ShardingGroup) FirstEndPosUnresolved() *binlog.Location { defer sg.RUnlock() if sg.remain < len(sg.sources) && sg.firstEndLocation != nil { // create a new pos to return + gset := gtid.MinGTIDSet(sg.flavor) + if sg.firstEndLocation.GTIDSet != nil { + gset = sg.firstEndLocation.GTIDSet.Clone() + } return &binlog.Location{ Position: mysql.Position{ Name: sg.firstEndLocation.Position.Name, Pos: sg.firstEndLocation.Position.Pos, }, - GTIDSet: sg.firstEndLocation.GTIDSet.Clone(), + GTIDSet: gset, } } return nil @@ -442,7 +459,7 @@ func (k *ShardingGroupKeeper) AddGroup(targetSchema, targetTable string, sourceI defer k.Unlock() if schemaGroup, ok := k.groups[schemaID]; !ok { - k.groups[schemaID] = NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, sourceIDs, meta, true) + k.groups[schemaID] = NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, sourceIDs, meta, true, k.cfg.Flavor) } else { _, _, _, err = schemaGroup.Merge(sourceIDs) if err != nil { @@ -452,7 +469,7 @@ func (k *ShardingGroupKeeper) AddGroup(targetSchema, targetTable string, sourceI var ok bool if group, ok = k.groups[targetTableID]; !ok { - group = NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, sourceIDs, meta, false) + group = NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, sourceIDs, meta, false, k.cfg.Flavor) k.groups[targetTableID] = group } else if merge { needShardingHandle, synced, remain, err = k.groups[targetTableID].Merge(sourceIDs) diff --git a/syncer/sharding_group_test.go b/syncer/sharding_group_test.go index 960a72b993..ecfd0b656c 100644 --- a/syncer/sharding_group_test.go +++ b/syncer/sharding_group_test.go @@ -38,20 +38,20 @@ func (t *testShardingGroupSuite) TestLowestFirstPosInGroups(c *C) { k := NewShardingGroupKeeper(tcontext.Background(), cfg) - g1 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db1.tbl1", "db1.tbl2"}, nil, false) + g1 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db1.tbl1", "db1.tbl2"}, nil, false, "") pos1 := mysql.Position{Name: "mysql-bin.000002", Pos: 123} endPos1 := mysql.Position{Name: "mysql-bin.000002", Pos: 456} _, _, _, err := g1.TrySync("db1.tbl1", binlog.Location{Position: pos1}, binlog.Location{Position: endPos1}, ddls) c.Assert(err, IsNil) // lowest - g2 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db2.tbl1", "db2.tbl2"}, nil, false) + g2 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db2.tbl1", "db2.tbl2"}, nil, false, "") pos2 := mysql.Position{Name: "mysql-bin.000001", Pos: 123} endPos2 := mysql.Position{Name: "mysql-bin.000001", Pos: 456} _, _, _, err = g2.TrySync("db2.tbl1", binlog.Location{Position: pos2}, binlog.Location{Position: endPos2}, ddls) c.Assert(err, IsNil) - g3 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db3.tbl1", "db3.tbl2"}, nil, false) + g3 := NewShardingGroup(k.cfg.SourceID, k.shardMetaSchema, k.shardMetaTable, []string{"db3.tbl1", "db3.tbl2"}, nil, false, "") pos3 := mysql.Position{Name: "mysql-bin.000003", Pos: 123} endPos3 := mysql.Position{Name: "mysql-bin.000003", Pos: 456} _, _, _, err = g3.TrySync("db3.tbl1", binlog.Location{Position: pos3}, binlog.Location{Position: endPos3}, ddls) diff --git a/syncer/streamer_controller.go b/syncer/streamer_controller.go index bbdc7f794d..0a8a6c6904 100644 --- a/syncer/streamer_controller.go +++ b/syncer/streamer_controller.go @@ -71,10 +71,7 @@ func (r *remoteBinlogReader) generateStreamer(location binlog.Location) (streame if r.EnableGTID { streamer, err := r.reader.StartSyncGTID(location.GTIDSet.Origin()) - if err != nil { - return nil, err - } - return streamer, nil + return streamer, terror.ErrSyncerUnitRemoteSteamerStartSync.Delegate(err) } // position's name may contain uuid, so need remove it From 2b807cd8077d56e1450a6f57e826c76d6d14b91a Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 8 Mar 2020 15:54:46 +0800 Subject: [PATCH 22/35] fix test --- _utils/terror_gen/errors_release.txt | 2 +- pkg/conn/baseconn.go | 4 ++++ syncer/checkpoint_test.go | 25 ++++++++++++------------- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/_utils/terror_gen/errors_release.txt b/_utils/terror_gen/errors_release.txt index ab0d900266..631286eaf2 100644 --- a/_utils/terror_gen/errors_release.txt +++ b/_utils/terror_gen/errors_release.txt @@ -150,7 +150,7 @@ ErrBinlogExtractPosition,[code=22001:class=binlog-op:scope=internal:level=high], ErrBinlogInvalidFilename,[code=22002:class=binlog-op:scope=internal:level=high],"invalid binlog filename" ErrBinlogParsePosFromStr,[code=22003:class=binlog-op:scope=internal:level=high],"" ErrCheckpointInvalidTaskMode,[code=24001:class=checkpoint:scope=internal:level=medium],"invalid task mode: %s" -ErrCheckpointSaveInvalidPos,[code=24002:class=checkpoint:scope=internal:level=high],"save point %v is older than current pos %v" +ErrCheckpointSaveInvalidPos,[code=24002:class=checkpoint:scope=internal:level=high],"save point %s is older than current location %s" ErrCheckpointInvalidTableFile,[code=24003:class=checkpoint:scope=internal:level=medium],"invalid db table sql file - %s" ErrCheckpointDBNotExistInFile,[code=24004:class=checkpoint:scope=internal:level=medium],"db (%s) not exist in data files, but in checkpoint" ErrCheckpointTableNotExistInFile,[code=24005:class=checkpoint:scope=internal:level=medium],"table (%s) not exist in db (%s) data files, but in checkpoint" diff --git a/pkg/conn/baseconn.go b/pkg/conn/baseconn.go index ee4b0d8276..30c596b245 100644 --- a/pkg/conn/baseconn.go +++ b/pkg/conn/baseconn.go @@ -179,6 +179,10 @@ func (conn *BaseConn) ExecuteSQLWithIgnoreError(tctx *tcontext.Context, ignoreEr zap.String("argument", utils.TruncateInterface(arg, -1)), log.ShortError(rerr)) } + fmt.Println(query) + fmt.Println(arg) + fmt.Println("_____") + fmt.Println(len(arg)) // we should return the exec err, instead of the rollback rerr. return i, terror.ErrDBExecuteFailed.Delegate(err, utils.TruncateString(query, -1)) } diff --git a/syncer/checkpoint_test.go b/syncer/checkpoint_test.go index 2d5b2003bf..3d267a0373 100644 --- a/syncer/checkpoint_test.go +++ b/syncer/checkpoint_test.go @@ -18,7 +18,6 @@ import ( "io/ioutil" "os" "path/filepath" - "strings" "github.com/pingcap/dm/dm/config" "github.com/pingcap/dm/pkg/binlog" @@ -57,6 +56,7 @@ func (s *testCheckpointSuite) SetUpSuite(c *C) { ServerID: 101, MetaSchema: "test", Name: "syncer_checkpoint_ut", + Flavor: mysql.MySQLFlavor, } log.SetLevel(zapcore.ErrorLevel) @@ -165,7 +165,7 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { cp.SaveGlobalPoint(binlog.Location{Position: pos1}) s.mock.ExpectBegin() - s.mock.ExpectExec("(162)?"+flushCheckPointSQL).WithArgs(cpid, "", "", pos1.Name, pos1.Pos, []byte("null"), true).WillReturnResult(sqlmock.NewResult(0, 1)) + s.mock.ExpectExec("(162)?"+flushCheckPointSQL).WithArgs(cpid, "", "", pos1.Name, pos1.Pos, "", []byte("null"), true).WillReturnResult(sqlmock.NewResult(0, 1)) s.mock.ExpectCommit() err = cp.FlushPointsExcept(tctx, nil, nil, nil) c.Log(errors.ErrorStack(err)) @@ -206,7 +206,7 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { // flush + rollback s.mock.ExpectBegin() - s.mock.ExpectExec("(202)?"+flushCheckPointSQL).WithArgs(cpid, "", "", pos2.Name, pos2.Pos, []byte("null"), true).WillReturnResult(sqlmock.NewResult(0, 1)) + s.mock.ExpectExec("(202)?"+flushCheckPointSQL).WithArgs(cpid, "", "", pos2.Name, pos2.Pos, "", []byte("null"), true).WillReturnResult(sqlmock.NewResult(0, 1)) s.mock.ExpectCommit() err = cp.FlushPointsExcept(tctx, nil, nil, nil) c.Assert(err, IsNil) @@ -218,8 +218,8 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { pos3 := pos2 pos3.Pos = pos2.Pos + 1000 // > pos2 to enable save cp.SaveGlobalPoint(binlog.Location{Position: pos3}) - columns := []string{"cp_schema", "cp_table", "binlog_name", "binlog_pos", "table_info", "is_global"} - s.mock.ExpectQuery(loadCheckPointSQL).WithArgs(cpid).WillReturnRows(sqlmock.NewRows(columns).AddRow("", "", pos2.Name, pos2.Pos, []byte("null"), true)) + columns := []string{"cp_schema", "cp_table", "binlog_name", "binlog_pos", "binlog_gtid", "table_info", "is_global"} + s.mock.ExpectQuery(loadCheckPointSQL).WithArgs(cpid).WillReturnRows(sqlmock.NewRows(columns).AddRow("", "", pos2.Name, pos2.Pos, "", []byte("null"), true)) err = cp.Load(tctx, s.tracker) c.Assert(err, IsNil) c.Assert(cp.GlobalPoint().Position, Equals, pos2) @@ -242,14 +242,14 @@ func (s *testCheckpointSuite) testGlobalCheckPoint(c *C, cp CheckPoint) { s.mock.ExpectCommit() err = cp.Clear(tctx) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, binlog.NewLocation("")) - c.Assert(cp.FlushedGlobalPoint(), Equals, binlog.NewLocation("")) + c.Assert(cp.GlobalPoint().Position, Equals, binlog.MinPosition) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, binlog.MinPosition) s.mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(sqlmock.NewRows(nil)) err = cp.Load(tctx, s.tracker) c.Assert(err, IsNil) - c.Assert(cp.GlobalPoint(), Equals, binlog.NewLocation("")) - c.Assert(cp.FlushedGlobalPoint(), Equals, binlog.NewLocation("")) + c.Assert(cp.GlobalPoint().Position, Equals, binlog.MinPosition) + c.Assert(cp.FlushedGlobalPoint().Position, Equals, binlog.MinPosition) } func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { @@ -289,7 +289,7 @@ func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { // flush + rollback s.mock.ExpectBegin() - s.mock.ExpectExec("(284)?"+flushCheckPointSQL).WithArgs(cpid, schema, table, pos2.Name, pos2.Pos, sqlmock.AnyArg(), false).WillReturnResult(sqlmock.NewResult(0, 1)) + s.mock.ExpectExec("(284)?"+flushCheckPointSQL).WithArgs(cpid, schema, table, pos2.Name, pos2.Pos, "", sqlmock.AnyArg(), false).WillReturnResult(sqlmock.NewResult(0, 1)) s.mock.ExpectCommit() err = cp.FlushPointsExcept(tctx, nil, nil, nil) c.Assert(err, IsNil) @@ -315,8 +315,7 @@ func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { func() { defer func() { r := recover() - matchStr := fmt.Sprintf("table checkpoint %+v less than global checkpoint %+v.*", pos1, cp.GlobalPoint()) - matchStr = strings.ReplaceAll(strings.ReplaceAll(matchStr, "(", "\\("), ")", "\\)") + matchStr := ".*less than global checkpoint.*" c.Assert(r, Matches, matchStr) }() cp.SaveGlobalPoint(binlog.Location{Position: pos2}) @@ -325,7 +324,7 @@ func (s *testCheckpointSuite) testTableCheckPoint(c *C, cp CheckPoint) { // flush but except + rollback s.mock.ExpectBegin() - s.mock.ExpectExec("(320)?"+flushCheckPointSQL).WithArgs(cpid, "", "", pos2.Name, pos2.Pos, []byte("null"), true).WillReturnResult(sqlmock.NewResult(0, 1)) + s.mock.ExpectExec("(320)?"+flushCheckPointSQL).WithArgs(cpid, "", "", pos2.Name, pos2.Pos, "", []byte("null"), true).WillReturnResult(sqlmock.NewResult(0, 1)) s.mock.ExpectCommit() err = cp.FlushPointsExcept(tctx, [][]string{{schema, table}}, nil, nil) c.Assert(err, IsNil) From c5bfffaa928f9a81094faa6a37216a79279d65f0 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 8 Mar 2020 16:32:38 +0800 Subject: [PATCH 23/35] add help function --- pkg/binlog/position.go | 34 +++++++++++++++++------------ syncer/checkpoint.go | 11 ++++------ syncer/sharding_group.go | 46 +++++++--------------------------------- 3 files changed, 33 insertions(+), 58 deletions(-) diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index dd96d481a3..2199673057 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -186,34 +186,42 @@ func NewLocation(flavor string) Location { } func (l Location) String() string { + return fmt.Sprintf("position: %v, gtid-set: %s", l.Position, l.GTIDSetStr()) +} + +// GTIDSetStr returns gtid set's string +func (l Location) GTIDSetStr() string { gsetStr := "" if l.GTIDSet != nil { gsetStr = l.GTIDSet.String() } - return fmt.Sprintf("position: %v, gtid-set: %s", l.Position, gsetStr) + + return gsetStr } // Clone clones a same Location func (l Location) Clone() Location { + return l.CloneWithFlavor("") +} + +// CloneWithFlavor clones the location, and if the GTIDSet is nil, will create a GTIDSet with specified flavor. +func (l Location) CloneWithFlavor(flavor string) Location { var newGTIDSet gtid.Set + if len(flavor) != 0 { + newGTIDSet = gtid.MinGTIDSet(flavor) + } + if l.GTIDSet != nil { newGTIDSet = l.GTIDSet.Clone() } return Location{ - Position: l.Position, - GTIDSet: newGTIDSet, - } -} - -// CloneGTIDSet clones location's gtid set -func (l Location) CloneGTIDSet() gtid.Set { - if l.GTIDSet != nil { - return l.GTIDSet.Clone() + Position: gmysql.Position{ + Name: l.Position.Name, + Pos: l.Position.Pos, + }, + GTIDSet: newGTIDSet, } - - // return a min mysql gtid set to avoid nil pointer panic - return gtid.MinGTIDSet("") } // CompareLocation returns: diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 608c77e254..fdd0fab34d 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -79,8 +79,8 @@ func (b *binlogPoint) save(location binlog.Location, ti *model.TableInfo) error defer b.Unlock() if binlog.CompareLocation(location, b.location) < 0 { - // support to save equal pos, but not older pos - return terror.ErrCheckpointSaveInvalidPos.Generate(location, b.location.Position) + // support to save equal location, but not older location + return terror.ErrCheckpointSaveInvalidPos.Generate(location, b.location) } b.location = location @@ -740,11 +740,8 @@ func (cp *RemoteCheckPoint) genUpdateSQL(cpSchema, cpTable string, location binl if len(tiBytes) == 0 { tiBytes = []byte("null") } - gsetStr := "" - if location.GTIDSet != nil { - gsetStr = location.GTIDSet.String() - } - args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, gsetStr, tiBytes, isGlobal} + + args := []interface{}{cp.id, cpSchema, cpTable, location.Position.Name, location.Position.Pos, location.GTIDSetStr(), tiBytes, isGlobal} return sql2, args } diff --git a/syncer/sharding_group.go b/syncer/sharding_group.go index a80a301df4..c1f997b837 100644 --- a/syncer/sharding_group.go +++ b/syncer/sharding_group.go @@ -80,11 +80,9 @@ import ( "github.com/pingcap/dm/pkg/binlog" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" - "github.com/pingcap/dm/pkg/gtid" "github.com/pingcap/dm/pkg/terror" shardmeta "github.com/pingcap/dm/syncer/sharding-meta" - "github.com/siddontang/go-mysql/mysql" "go.uber.org/zap" ) @@ -316,34 +314,15 @@ func (sg *ShardingGroup) FirstLocationUnresolved() *binlog.Location { sg.RLock() defer sg.RUnlock() if sg.remain < len(sg.sources) && sg.firstLocation != nil { - // create a new pos to return - gset := gtid.MinGTIDSet(sg.flavor) - if sg.firstLocation.GTIDSet != nil { - gset = sg.firstLocation.GTIDSet.Clone() - } - - return &binlog.Location{ - Position: mysql.Position{ - Name: sg.firstLocation.Position.Name, - Pos: sg.firstLocation.Position.Pos, - }, - GTIDSet: gset, - } + // create a new location to return + location := sg.firstLocation.CloneWithFlavor(sg.flavor) + return &location } item := sg.meta.GetGlobalActiveDDL() if item != nil { // make a new copy - gset := gtid.MinGTIDSet(sg.flavor) - if item.FirstLocation.GTIDSet != nil { - gset = item.FirstLocation.GTIDSet.Clone() - } - return &binlog.Location{ - Position: mysql.Position{ - Name: item.FirstLocation.Position.Name, - Pos: item.FirstLocation.Position.Pos, - }, - GTIDSet: gset, - } + location := item.FirstLocation.CloneWithFlavor(sg.flavor) + return &location } return nil } @@ -353,18 +332,9 @@ func (sg *ShardingGroup) FirstEndPosUnresolved() *binlog.Location { sg.RLock() defer sg.RUnlock() if sg.remain < len(sg.sources) && sg.firstEndLocation != nil { - // create a new pos to return - gset := gtid.MinGTIDSet(sg.flavor) - if sg.firstEndLocation.GTIDSet != nil { - gset = sg.firstEndLocation.GTIDSet.Clone() - } - return &binlog.Location{ - Position: mysql.Position{ - Name: sg.firstEndLocation.Position.Name, - Pos: sg.firstEndLocation.Position.Pos, - }, - GTIDSet: gset, - } + // create a new location to return + location := sg.firstEndLocation.CloneWithFlavor(sg.flavor) + return &location } return nil } From d8dea9c6da7e49d66448fec8ad6974291240a382 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 8 Mar 2020 17:39:57 +0800 Subject: [PATCH 24/35] remove useless code --- pkg/conn/baseconn.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/conn/baseconn.go b/pkg/conn/baseconn.go index 30c596b245..ee4b0d8276 100644 --- a/pkg/conn/baseconn.go +++ b/pkg/conn/baseconn.go @@ -179,10 +179,6 @@ func (conn *BaseConn) ExecuteSQLWithIgnoreError(tctx *tcontext.Context, ignoreEr zap.String("argument", utils.TruncateInterface(arg, -1)), log.ShortError(rerr)) } - fmt.Println(query) - fmt.Println(arg) - fmt.Println("_____") - fmt.Println(len(arg)) // we should return the exec err, instead of the rollback rerr. return i, terror.ErrDBExecuteFailed.Delegate(err, utils.TruncateString(query, -1)) } From a59f08784e1083d4ab1f8ee39ccdd841ab333ce9 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 8 Mar 2020 18:47:39 +0800 Subject: [PATCH 25/35] address comment --- syncer/checkpoint.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index fdd0fab34d..461c8180d2 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -242,12 +242,13 @@ type RemoteCheckPoint struct { // NewRemoteCheckPoint creates a new RemoteCheckPoint func NewRemoteCheckPoint(tctx *tcontext.Context, cfg *config.SubTaskConfig, id string) CheckPoint { + location := binlog.NewLocation(cfg.Flavor) cp := &RemoteCheckPoint{ cfg: cfg, tableName: dbutil.TableName(cfg.MetaSchema, cfg.Name+"_syncer_checkpoint"), id: id, points: make(map[string]map[string]*binlogPoint), - globalPoint: newBinlogPoint(binlog.NewLocation(cfg.Flavor), binlog.NewLocation(cfg.Flavor), nil, nil), + globalPoint: newBinlogPoint(location, location, nil, nil), logCtx: tcontext.Background().WithLogger(tctx.L().WithFields(zap.String("component", "remote checkpoint"))), } @@ -293,8 +294,8 @@ func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { return err } - cp.globalPoint = newBinlogPoint(binlog.NewLocation(cp.cfg.Flavor), binlog.NewLocation(cp.cfg.Flavor), nil, nil) - + location := binlog.NewLocation(cp.cfg.Flavor) + cp.globalPoint = newBinlogPoint(location, location, nil, nil) cp.points = make(map[string]map[string]*binlogPoint) return nil @@ -692,9 +693,6 @@ func (cp *RemoteCheckPoint) LoadMeta() error { if err != nil { return err } - if gset == nil { - gset = gtid.MinGTIDSet(cp.cfg.Flavor) - } location = &binlog.Location{ Position: mysql.Position{ From 072222a3222db641e5d0dd0553fd0a9032411af7 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 8 Mar 2020 20:31:24 +0800 Subject: [PATCH 26/35] some test use gtid && minor fix --- syncer/syncer.go | 5 +++-- tests/ha_master/conf/source1.toml | 2 +- tests/http_apis/conf/source1.toml | 2 +- tests/safe_mode/conf/source1.toml | 2 +- tests/sequence_safe_mode/conf/source1.toml | 2 +- tests/sequence_sharding/conf/source1.toml | 2 +- tests/start_task/conf/source1.toml | 2 +- 7 files changed, 9 insertions(+), 8 deletions(-) diff --git a/syncer/syncer.go b/syncer/syncer.go index f2438acb42..5224a49b0c 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1520,6 +1520,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } s.tctx.L().Info("", zap.String("event", "query"), zap.String("statement", sql), zap.String("schema", usedSchema), zap.Stringer("last location", ec.lastLocation), log.WrapStringerField("location", ec.currentLocation)) + lastGTIDSet := ec.lastLocation.GTIDSet *ec.lastLocation = ec.currentLocation.Clone() // update lastLocation, because we have checked `isDDL` *ec.latestOp = ddl @@ -1665,7 +1666,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } for _, tbl := range targetTbls { // save checkpoint of each table - s.saveTablePoint(tbl.Schema, tbl.Name, *ec.currentLocation) + s.saveTablePoint(tbl.Schema, tbl.Name, ec.currentLocation.Clone()) } for _, table := range onlineDDLTableNames { @@ -1695,7 +1696,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e Name: ec.currentLocation.Position.Name, Pos: ec.currentLocation.Position.Pos - ec.header.EventSize, }, - GTIDSet: ec.lastLocation.GTIDSet.Clone(), + GTIDSet: lastGTIDSet, } source, _ = GenTableID(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name) diff --git a/tests/ha_master/conf/source1.toml b/tests/ha_master/conf/source1.toml index dfd1cf61c8..0e4e5cb00f 100644 --- a/tests/ha_master/conf/source1.toml +++ b/tests/ha_master/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" enable-relay = false diff --git a/tests/http_apis/conf/source1.toml b/tests/http_apis/conf/source1.toml index e9748fb168..7b08068ef2 100644 --- a/tests/http_apis/conf/source1.toml +++ b/tests/http_apis/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" diff --git a/tests/safe_mode/conf/source1.toml b/tests/safe_mode/conf/source1.toml index f9e754008c..ef1083f14a 100644 --- a/tests/safe_mode/conf/source1.toml +++ b/tests/safe_mode/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" diff --git a/tests/sequence_safe_mode/conf/source1.toml b/tests/sequence_safe_mode/conf/source1.toml index f9e754008c..ef1083f14a 100644 --- a/tests/sequence_safe_mode/conf/source1.toml +++ b/tests/sequence_safe_mode/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" diff --git a/tests/sequence_sharding/conf/source1.toml b/tests/sequence_sharding/conf/source1.toml index e9748fb168..7b08068ef2 100644 --- a/tests/sequence_sharding/conf/source1.toml +++ b/tests/sequence_sharding/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" diff --git a/tests/start_task/conf/source1.toml b/tests/start_task/conf/source1.toml index e9748fb168..7b08068ef2 100644 --- a/tests/start_task/conf/source1.toml +++ b/tests/start_task/conf/source1.toml @@ -2,7 +2,7 @@ source-id = "mysql-replica-01" flavor = "" -enable-gtid = false +enable-gtid = true relay-binlog-name = "" relay-binlog-gtid = "" From 916ffa1a3a936a7148aab26e8195a091de10033b Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 17:45:33 +0800 Subject: [PATCH 27/35] ignore fake rotate event --- syncer/syncer.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/syncer/syncer.go b/syncer/syncer.go index 5224a49b0c..249b974610 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1291,6 +1291,11 @@ type eventContext struct { // TODO: Further split into smaller functions and group common arguments into // a context struct. func (s *Syncer) handleRotateEvent(ev *replication.RotateEvent, ec eventContext) error { + if ec.header.Timestamp == 0 || ec.header.LogPos == 0 { + // it is fake rotate event, ignore it + return nil + } + *ec.currentLocation = binlog.Location{ Position: mysql.Position{ Name: string(ev.NextLogName), From 96483974520b150ff62c9cd0cad1af59cebf84ce Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 17:53:26 +0800 Subject: [PATCH 28/35] update current location before redirect streamer --- syncer/syncer.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/syncer/syncer.go b/syncer/syncer.go index 249b974610..a939d74123 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1087,6 +1087,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { return err2 } + currentLocation = shardingReSync.currLocation.Clone() err2 = s.streamerController.RedirectStreamer(s.tctx, nextLocation.Clone()) if err2 != nil { return err2 @@ -1110,6 +1111,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { savedGlobalLastLocation = lastLocation.Clone() // save global last location lastLocation = shardingReSync.currLocation.Clone() + currentLocation = shardingReSync.currLocation.Clone() err = s.streamerController.RedirectStreamer(s.tctx, shardingReSync.currLocation.Clone()) if err != nil { return err From 9f48852df1dd5f34b133d3460135a21492bd1be7 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 19:17:35 +0800 Subject: [PATCH 29/35] address comment --- syncer/checkpoint.go | 6 ++---- syncer/syncer.go | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index 461c8180d2..b930cd5384 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -242,13 +242,12 @@ type RemoteCheckPoint struct { // NewRemoteCheckPoint creates a new RemoteCheckPoint func NewRemoteCheckPoint(tctx *tcontext.Context, cfg *config.SubTaskConfig, id string) CheckPoint { - location := binlog.NewLocation(cfg.Flavor) cp := &RemoteCheckPoint{ cfg: cfg, tableName: dbutil.TableName(cfg.MetaSchema, cfg.Name+"_syncer_checkpoint"), id: id, points: make(map[string]map[string]*binlogPoint), - globalPoint: newBinlogPoint(location, location, nil, nil), + globalPoint: newBinlogPoint(binlog.NewLocation(cfg.Flavor), binlog.NewLocation(cfg.Flavor), nil, nil), logCtx: tcontext.Background().WithLogger(tctx.L().WithFields(zap.String("component", "remote checkpoint"))), } @@ -294,8 +293,7 @@ func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { return err } - location := binlog.NewLocation(cp.cfg.Flavor) - cp.globalPoint = newBinlogPoint(location, location, nil, nil) + cp.globalPoint = newBinlogPoint(binlog.NewLocation(cp.cfg.Flavor), binlog.NewLocation(cp.cfg.Flavor), nil, nil) cp.points = make(map[string]map[string]*binlogPoint) return nil diff --git a/syncer/syncer.go b/syncer/syncer.go index a939d74123..fb3da00184 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1087,7 +1087,7 @@ func (s *Syncer) Run(ctx context.Context) (err error) { return err2 } - currentLocation = shardingReSync.currLocation.Clone() + currentLocation = nextLocation.Clone() err2 = s.streamerController.RedirectStreamer(s.tctx, nextLocation.Clone()) if err2 != nil { return err2 From d5584c7c01033f581cb771d62a8e296d4946a10b Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 19:58:54 +0800 Subject: [PATCH 30/35] add some clone --- syncer/syncer.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/syncer/syncer.go b/syncer/syncer.go index fb3da00184..31c89d75c6 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1410,7 +1410,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err // for RowsEvent, one event may have multi SQLs and multi keys, (eg. INSERT INTO t1 VALUES (11, 12), (21, 22) ) // to cover them dispatched to different channels, we still apply operator here // ugly, but I have no better solution yet. - applied, sqls, err = s.tryApplySQLOperator(*ec.currentLocation, nil) // forbidden sql-pattern for DMLs + applied, sqls, err = s.tryApplySQLOperator(ec.currentLocation.Clone(), nil) // forbidden sql-pattern for DMLs if err != nil { return err } @@ -1644,7 +1644,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e if !s.cfg.IsSharding { s.tctx.L().Info("start to handle ddls in normal mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("location", ec.currentLocation)) // try apply SQL operator before addJob. now, one query event only has one DDL job, if updating to multi DDL jobs, refine this. - applied, appliedSQLs, applyErr := s.tryApplySQLOperator(*ec.currentLocation, needHandleDDLs) + applied, appliedSQLs, applyErr := s.tryApplySQLOperator(ec.currentLocation.Clone(), needHandleDDLs) if applyErr != nil { return terror.Annotatef(applyErr, "try apply SQL operator on binlog-location %s with DDLs %v", ec.currentLocation, needHandleDDLs) } @@ -1720,7 +1720,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } annotate = "add table to shard group" default: - needShardingHandle, group, synced, active, remain, err = s.sgk.TrySync(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name, source, startLocation, *ec.currentLocation, needHandleDDLs) + needShardingHandle, group, synced, active, remain, err = s.sgk.TrySync(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name, source, startLocation.Clone(), ec.currentLocation.Clone(), needHandleDDLs) if err != nil { return err } @@ -1752,7 +1752,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // for non-last sharding DDL's table, this checkpoint will be used to skip binlog event when re-syncing // NOTE: when last sharding DDL executed, all this checkpoints will be flushed in the same txn s.tctx.L().Info("save table checkpoint for source", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) - s.saveTablePoint(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name, *ec.currentLocation) + s.saveTablePoint(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name, ec.currentLocation.Clone()) if !synced { s.tctx.L().Info("source shard group is not synced", zap.String("event", "query"), zap.String("source", source), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) return nil @@ -1832,7 +1832,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e s.tctx.L().Info("start to handle ddls in shard mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), zap.Stringer("start location", startLocation), log.WrapStringerField("end location", ec.currentLocation)) // try apply SQL operator before addJob. now, one query event only has one DDL job, if updating to multi DDL jobs, refine this. - applied, appliedSQLs, err := s.tryApplySQLOperator(*ec.currentLocation, needHandleDDLs) + applied, appliedSQLs, err := s.tryApplySQLOperator(ec.currentLocation.Clone(), needHandleDDLs) if err != nil { return terror.Annotatef(err, "try apply SQL operator on binlog-location %s with DDLs %v", ec.currentLocation, needHandleDDLs) } From 53bef7530b5e9218480e247c55641c3607037845 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 21:32:45 +0800 Subject: [PATCH 31/35] add location clone --- syncer/checkpoint.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index b930cd5384..e20b6288cb 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -634,7 +634,7 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T } if isGlobal { if binlog.CompareLocation(location, binlog.NewLocation(cp.cfg.Flavor)) > 0 { - cp.globalPoint = newBinlogPoint(location, location, nil, nil) + cp.globalPoint = newBinlogPoint(location.Clone(), location.Clone(), nil, nil) cp.logCtx.L().Info("fetch global checkpoint from DB", log.WrapStringerField("global checkpoint", cp.globalPoint)) } continue // skip global checkpoint @@ -659,7 +659,7 @@ func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context, schemaTracker *schema.T mSchema = make(map[string]*binlogPoint) cp.points[cpSchema] = mSchema } - mSchema[cpTable] = newBinlogPoint(location, location, &ti, &ti) + mSchema[cpTable] = newBinlogPoint(location.Clone(), location.Clone(), &ti, &ti) } return terror.WithScope(terror.DBErrorAdapt(rows.Err(), terror.ErrDBDriverError), terror.ScopeDownstream) @@ -683,8 +683,7 @@ func (cp *RemoteCheckPoint) LoadMeta() error { // load meta from task config if cp.cfg.Meta == nil { cp.logCtx.L().Warn("don't set meta in increment task-mode") - location1 := binlog.NewLocation(cp.cfg.Flavor) - cp.globalPoint = newBinlogPoint(location1, location1, nil, nil) + cp.globalPoint = newBinlogPoint(binlog.NewLocation(cp.cfg.Flavor), binlog.NewLocation(cp.cfg.Flavor), nil, nil) return nil } gset, err := gtid.ParserGTID(cp.cfg.Flavor, cp.cfg.Meta.BinLogGTID) @@ -706,7 +705,7 @@ func (cp *RemoteCheckPoint) LoadMeta() error { // if meta loaded, we will start syncing from meta's pos if location != nil { - cp.globalPoint = newBinlogPoint(*location, *location, nil, nil) + cp.globalPoint = newBinlogPoint(location.Clone(), location.Clone(), nil, nil) cp.logCtx.L().Info("loaded checkpoints from meta", log.WrapStringerField("global checkpoint", cp.globalPoint)) } From 5605ab88ccf090b55e730ae396b4a5877928deae Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 21:53:59 +0800 Subject: [PATCH 32/35] address comment --- dm/worker/worker.go | 1 - pkg/binlog/position.go | 6 ++---- syncer/checkpoint.go | 2 +- syncer/syncer.go | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/dm/worker/worker.go b/dm/worker/worker.go index 4871ce29a3..773ca30786 100644 --- a/dm/worker/worker.go +++ b/dm/worker/worker.go @@ -763,7 +763,6 @@ func (w *Worker) copyConfigFromWorker(cfg *config.SubTaskConfig) { cfg.ServerID = w.cfg.ServerID cfg.RelayDir = w.cfg.RelayDir cfg.EnableGTID = w.cfg.EnableGTID - cfg.SyncerConfig.EnableGTID = w.cfg.EnableGTID cfg.UseRelay = w.cfg.EnableRelay // we can remove this from SubTaskConfig later, because syncer will always read from relay diff --git a/pkg/binlog/position.go b/pkg/binlog/position.go index 2199673057..a7f1f3c0a2 100644 --- a/pkg/binlog/position.go +++ b/pkg/binlog/position.go @@ -207,12 +207,10 @@ func (l Location) Clone() Location { // CloneWithFlavor clones the location, and if the GTIDSet is nil, will create a GTIDSet with specified flavor. func (l Location) CloneWithFlavor(flavor string) Location { var newGTIDSet gtid.Set - if len(flavor) != 0 { - newGTIDSet = gtid.MinGTIDSet(flavor) - } - if l.GTIDSet != nil { newGTIDSet = l.GTIDSet.Clone() + } else if len(flavor) != 0 { + newGTIDSet = gtid.MinGTIDSet(flavor) } return Location{ diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index e20b6288cb..e87bab9f61 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -303,7 +303,7 @@ func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { func (cp *RemoteCheckPoint) SaveTablePoint(sourceSchema, sourceTable string, point binlog.Location, ti *model.TableInfo) { cp.Lock() defer cp.Unlock() - cp.saveTablePoint(sourceSchema, sourceTable, point, ti) + cp.saveTablePoint(sourceSchema, sourceTable, point.Clone(), ti) } // saveTablePoint saves single table's checkpoint without mutex.Lock diff --git a/syncer/syncer.go b/syncer/syncer.go index 31c89d75c6..fe35091fdb 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -687,7 +687,7 @@ func (s *Syncer) saveTablePoint(db, table string, location binlog.Location) { zap.Stringer("location", location), zap.Error(err)) } - s.checkpoint.SaveTablePoint(db, table, location, ti) + s.checkpoint.SaveTablePoint(db, table, location.Clone(), ti) } func (s *Syncer) addJob(job *job) error { From 35248aa2ad3264b58440ee8b3358e7993af014b1 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 22:02:07 +0800 Subject: [PATCH 33/35] address comemnt --- syncer/checkpoint_test.go | 1 + syncer/sharding-meta/shardmeta.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/syncer/checkpoint_test.go b/syncer/checkpoint_test.go index 3d267a0373..d09fdf9ae1 100644 --- a/syncer/checkpoint_test.go +++ b/syncer/checkpoint_test.go @@ -45,6 +45,7 @@ var ( var _ = Suite(&testCheckpointSuite{}) +// TODO: add test cases for GTID type testCheckpointSuite struct { cfg *config.SubTaskConfig mock sqlmock.Sqlmock diff --git a/syncer/sharding-meta/shardmeta.go b/syncer/sharding-meta/shardmeta.go index a7765e7176..44a83c46e1 100644 --- a/syncer/sharding-meta/shardmeta.go +++ b/syncer/sharding-meta/shardmeta.go @@ -46,10 +46,18 @@ type DDLItem struct { // NewDDLItem creates a new DDLItem func NewDDLItem(location binlog.Location, ddls []string, source string) *DDLItem { + gsetStr := "" + if location.GTIDSet != nil { + gsetStr = location.GTIDSet.String() + } + return &DDLItem{ - FirstLocation: location, + FirstLocation: location.Clone(), DDLs: ddls, Source: source, + + FirstPosition: location.Position, + FirstGTIDSet: gsetStr, } } @@ -257,14 +265,6 @@ func (meta *ShardingMeta) ActiveDDLFirstLocation() (binlog.Location, error) { // FlushData returns sharding meta flush SQL and args func (meta *ShardingMeta) FlushData(sourceID, tableID string) ([]string, [][]interface{}) { - // set FirstPosition and FirstGTIDSet for json marshal - for _, item := range meta.global.Items { - item.FirstPosition = item.FirstLocation.Position - if item.FirstLocation.GTIDSet != nil { - item.FirstGTIDSet = item.FirstLocation.GTIDSet.String() - } - } - if len(meta.global.Items) == 0 { sql2 := fmt.Sprintf("DELETE FROM `%s`.`%s` where source_id=? and target_table_id=?", meta.schema, meta.table) args2 := []interface{}{sourceID, tableID} From e0b6cb5588549544f953ec1fbf02a56758099e30 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 22:10:44 +0800 Subject: [PATCH 34/35] address comment --- syncer/checkpoint.go | 6 +++--- syncer/syncer.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/syncer/checkpoint.go b/syncer/checkpoint.go index e87bab9f61..fa1dceaf3a 100644 --- a/syncer/checkpoint.go +++ b/syncer/checkpoint.go @@ -393,12 +393,12 @@ func (cp *RemoteCheckPoint) IsNewerTablePoint(sourceSchema, sourceTable string, } // SaveGlobalPoint implements CheckPoint.SaveGlobalPoint -func (cp *RemoteCheckPoint) SaveGlobalPoint(pos binlog.Location) { +func (cp *RemoteCheckPoint) SaveGlobalPoint(location binlog.Location) { cp.Lock() defer cp.Unlock() - cp.logCtx.L().Debug("save global checkpoint", zap.Stringer("position", pos)) - if err := cp.globalPoint.save(pos, nil); err != nil { + cp.logCtx.L().Debug("save global checkpoint", zap.Stringer("location", location)) + if err := cp.globalPoint.save(location.Clone(), nil); err != nil { cp.logCtx.L().Error("fail to save global checkpoint", log.ShortError(err)) } } diff --git a/syncer/syncer.go b/syncer/syncer.go index fe35091fdb..c7986a78b5 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -754,7 +754,7 @@ func (s *Syncer) saveGlobalPoint(globalLocation binlog.Location) { if s.cfg.IsSharding { globalLocation = s.sgk.AdjustGlobalLocation(globalLocation) } - s.checkpoint.SaveGlobalPoint(globalLocation) + s.checkpoint.SaveGlobalPoint(globalLocation.Clone()) } func (s *Syncer) resetShardingGroup(schema, table string) { From ca671b55c3ed5259f6f854200a84f53d4e04b6d2 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Mon, 9 Mar 2020 22:29:40 +0800 Subject: [PATCH 35/35] add wait time --- dm/master/server_test.go | 8 ++++---- tests/sequence_sharding/run.sh | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/dm/master/server_test.go b/dm/master/server_test.go index d188d17c88..346cc79845 100644 --- a/dm/master/server_test.go +++ b/dm/master/server_test.go @@ -252,7 +252,7 @@ func testMockScheduler(ctx context.Context, wg *sync.WaitGroup, c *check.C, sour defer wg.Done() c.Assert(ha.KeepAlive(ctx, etcdTestCli, workerName, keepAliveTTL), check.IsNil) }(ctx1, name) - c.Assert(utils.WaitSomething(30, 10*time.Millisecond, func() bool { + c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool { w := scheduler2.GetWorkerBySource(sources[i]) return w != nil && w.BaseInfo().Name == name }), check.IsTrue) @@ -827,7 +827,7 @@ func (t *testMaster) TestServer(c *check.C) { cancel() s.Close() - c.Assert(utils.WaitSomething(30, 10*time.Millisecond, func() bool { + c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool { return s.closed.Get() }), check.IsTrue) } @@ -861,7 +861,7 @@ func (t *testMaster) TestJoinMember(c *check.C) { defer s1.Close() // wait the first one become the leader - c.Assert(utils.WaitSomething(30, 10*time.Millisecond, func() bool { + c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool { return s1.election.IsLeader() }), check.IsTrue) @@ -971,7 +971,7 @@ func (t *testMaster) TestOperateSource(c *check.C) { defer wg.Done() c.Assert(ha.KeepAlive(ctx, s1.etcdClient, workerName, keepAliveTTL), check.IsNil) }(ctx1, workerName) - c.Assert(utils.WaitSomething(30, 10*time.Millisecond, func() bool { + c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool { w := s1.scheduler.GetWorkerBySource(sourceID) return w != nil && w.BaseInfo().Name == workerName }), check.IsTrue) diff --git a/tests/sequence_sharding/run.sh b/tests/sequence_sharding/run.sh index 6e895643f3..bb4ef780a2 100755 --- a/tests/sequence_sharding/run.sh +++ b/tests/sequence_sharding/run.sh @@ -33,6 +33,7 @@ function run() { run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 run_sql_file $cur/data/db2.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 + sleep 3 # use sync_diff_inspector to check data now! check_sync_diff $WORK_DIR $cur/conf/diff_config.toml }