Skip to content

Commit

Permalink
pitr: Compatible with tiflash (#34776)
Browse files Browse the repository at this point in the history
close #34777
  • Loading branch information
joccau authored May 18, 2022
1 parent 381e870 commit 76cc528
Show file tree
Hide file tree
Showing 6 changed files with 45 additions and 24 deletions.
6 changes: 4 additions & 2 deletions br/pkg/restore/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -1452,14 +1452,16 @@ func (rc *Client) GetRebasedTables() map[UniqueTableName]bool {
func (rc *Client) PreCheckTableTiFlashReplica(
ctx context.Context,
tables []*metautil.Table,
skipTiflash bool,
) error {
tiFlashStores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.TiFlashOnly)
if err != nil {
return errors.Trace(err)
}
tiFlashStoreCount := len(tiFlashStores)
for _, table := range tables {
if table.Info.TiFlashReplica != nil && table.Info.TiFlashReplica.Count > uint64(tiFlashStoreCount) {
if skipTiflash ||
(table.Info.TiFlashReplica != nil && table.Info.TiFlashReplica.Count > uint64(tiFlashStoreCount)) {
// we cannot satisfy TiFlash replica in restore cluster. so we should
// set TiFlashReplica to unavailable in tableInfo, to avoid TiDB cannot sense TiFlash and make plan to TiFlash
// see details at https://github.com/pingcap/br/issues/931
Expand Down Expand Up @@ -1952,7 +1954,7 @@ func (rc *Client) UpdateSchemaVersion(ctx context.Context) error {
func(ctx context.Context, txn kv.Transaction) error {
t := meta.NewMeta(txn)
var e error
schemaVersion, e = t.GenSchemaVersion()
schemaVersion, e = t.GenSchemaVersions(128)
return e
},
); err != nil {
Expand Down
7 changes: 6 additions & 1 deletion br/pkg/restore/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ func TestPreCheckTableTiFlashReplicas(t *testing.T) {
}
}
ctx := context.Background()
require.Nil(t, client.PreCheckTableTiFlashReplica(ctx, tables))
require.Nil(t, client.PreCheckTableTiFlashReplica(ctx, tables, false))

for i := 0; i < len(tables); i++ {
if i == 0 || i > 2 {
Expand All @@ -234,4 +234,9 @@ func TestPreCheckTableTiFlashReplicas(t *testing.T) {
require.Equal(t, i, obtainCount)
}
}

require.Nil(t, client.PreCheckTableTiFlashReplica(ctx, tables, true))
for i := 0; i < len(tables); i++ {
require.Nil(t, tables[i].Info.TiFlashReplica)
}
}
17 changes: 10 additions & 7 deletions br/pkg/task/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,9 @@ type RestoreConfig struct {
FullBackupStorage string `json:"full-backup-storage" toml:"full-backup-storage"`

// [startTs, RestoreTS] is used to `restore log` from StartTS to RestoreTS.
StartTS uint64 `json:"start-ts" toml:"start-ts"`
RestoreTS uint64 `json:"restore-ts" toml:"restore-ts"`
StartTS uint64 `json:"start-ts" toml:"start-ts"`
RestoreTS uint64 `json:"restore-ts" toml:"restore-ts"`
skipTiflash bool `json:"-" toml:"-"`
}

// DefineRestoreFlags defines common flags for the restore tidb command.
Expand Down Expand Up @@ -488,7 +489,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf
ddlJobs := restore.FilterDDLJobs(client.GetDDLJobs(), tables)
ddlJobs = restore.FilterDDLJobByRules(ddlJobs, restore.DDLJobBlockListRule)

err = client.PreCheckTableTiFlashReplica(ctx, tables)
err = client.PreCheckTableTiFlashReplica(ctx, tables, cfg.skipTiflash)
if err != nil {
return errors.Trace(err)
}
Expand Down Expand Up @@ -559,7 +560,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf
summary.CollectInt("restore ranges", rangeSize)
log.Info("range and file prepared", zap.Int("file count", len(files)), zap.Int("range count", rangeSize))

restoreSchedulers, err := restorePreWork(ctx, client, mgr)
restoreSchedulers, err := restorePreWork(ctx, client, mgr, true)
if err != nil {
return errors.Trace(err)
}
Expand Down Expand Up @@ -688,13 +689,15 @@ func filterRestoreFiles(

// restorePreWork executes some prepare work before restore.
// TODO make this function returns a restore post work.
func restorePreWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr) (pdutil.UndoFunc, error) {
func restorePreWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr, switchToImport bool) (pdutil.UndoFunc, error) {
if client.IsOnline() {
return pdutil.Nop, nil
}

// Switch TiKV cluster to import mode (adjust rocksdb configuration).
client.SwitchToImportMode(ctx)
if switchToImport {
// Switch TiKV cluster to import mode (adjust rocksdb configuration).
client.SwitchToImportMode(ctx)
}

return mgr.RemoveSchedulers(ctx)
}
Expand Down
2 changes: 1 addition & 1 deletion br/pkg/task/restore_raw.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR
return errors.Trace(err)
}

restoreSchedulers, err := restorePreWork(ctx, client, mgr)
restoreSchedulers, err := restorePreWork(ctx, client, mgr, true)
if err != nil {
return errors.Trace(err)
}
Expand Down
32 changes: 19 additions & 13 deletions br/pkg/task/stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -973,6 +973,8 @@ func RunStreamRestore(
if len(cfg.FullBackupStorage) > 0 {
logStorage := cfg.Config.Storage
cfg.Config.Storage = cfg.FullBackupStorage
// TiFlash replica is restored to down-stream on 'pitr' currently.
cfg.skipTiflash = true
if err = RunRestore(ctx, g, FullRestoreCmd, cfg); err != nil {
return errors.Trace(err)
}
Expand Down Expand Up @@ -1004,7 +1006,14 @@ func restoreStream(
ctx = opentracing.ContextWithSpan(ctx, span1)
}

client, err := createRestoreClient(ctx, g, cfg)
mgr, err := NewMgr(ctx, g, cfg.PD, cfg.TLS, GetKeepalive(&cfg.Config),
cfg.CheckRequirements, true)
if err != nil {
return errors.Trace(err)
}
defer mgr.Close()

client, err := createRestoreClient(ctx, g, cfg, mgr)
if err != nil {
return errors.Annotate(err, "failed to create restore client")
}
Expand All @@ -1017,6 +1026,14 @@ func restoreStream(
client.SetRestoreRangeTS(cfg.StartTS, cfg.RestoreTS, ShiftTS(cfg.StartTS))
client.SetCurrentTS(currentTS)

restoreSchedulers, err := restorePreWork(ctx, client, mgr, false)
if err != nil {
return errors.Trace(err)
}
// Always run the post-work even on error, so we don't stuck in the import
// mode or emptied schedulers
defer restorePostWork(ctx, client, restoreSchedulers)

// read meta by given ts.
metas, err := client.ReadStreamMetaByTS(ctx, cfg.RestoreTS)
if err != nil {
Expand Down Expand Up @@ -1081,19 +1098,8 @@ func restoreStream(
return nil
}

func createRestoreClient(ctx context.Context, g glue.Glue, cfg *RestoreConfig) (*restore.Client, error) {
func createRestoreClient(ctx context.Context, g glue.Glue, cfg *RestoreConfig, mgr *conn.Mgr) (*restore.Client, error) {
var err error
mgr, err := NewMgr(ctx, g, cfg.PD, cfg.TLS, GetKeepalive(&cfg.Config),
cfg.CheckRequirements, true)
if err != nil {
return nil, errors.Trace(err)
}
defer func() {
if err != nil {
mgr.Close()
}
}()

keepaliveCfg := GetKeepalive(&cfg.Config)
keepaliveCfg.PermitWithoutStream = true
client := restore.NewRestoreClient(mgr.GetPDClient(), mgr.GetTLSConfig(), keepaliveCfg, false)
Expand Down
5 changes: 5 additions & 0 deletions meta/meta.go
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,11 @@ func (m *Meta) GenSchemaVersion() (int64, error) {
return m.txn.Inc(mSchemaVersionKey, 1)
}

// GenSchemaVersions increases the schema version.
func (m *Meta) GenSchemaVersions(count int64) (int64, error) {
return m.txn.Inc(mSchemaVersionKey, count)
}

func (m *Meta) checkPolicyExists(policyKey []byte) error {
v, err := m.txn.HGet(mPolicies, policyKey)
if err == nil && v == nil {
Expand Down

0 comments on commit 76cc528

Please sign in to comment.