Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into lock_waits_optimisic
Browse files Browse the repository at this point in the history
  • Loading branch information
longfangsong committed May 31, 2022
2 parents 9065976 + b24ef9a commit 243e056
Show file tree
Hide file tree
Showing 142 changed files with 12,466 additions and 9,687 deletions.
5 changes: 3 additions & 2 deletions bindinfo/bind_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -496,12 +496,13 @@ func TestErrorBind(t *testing.T) {
require.NotNil(t, bind.UpdateTime)

tk.MustExec("drop index index_t on t")
_, err = tk.Exec("select * from t where i > 10")
rs, err := tk.Exec("select * from t where i > 10")
require.NoError(t, err)
rs.Close()

dom.BindHandle().DropInvalidBindRecord()

rs, err := tk.Exec("show global bindings")
rs, err = tk.Exec("show global bindings")
require.NoError(t, err)
chk := rs.NewChunk(nil)
err = rs.Next(context.TODO(), chk)
Expand Down
1 change: 1 addition & 0 deletions br/cmd/br/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,5 +160,6 @@ func newStreamRestoreCommand() *cobra.Command {
}
task.DefineFilterFlags(command, filterOutSysAndMemTables, true)
task.DefineStreamRestoreFlags(command)
command.Hidden = true
return command
}
1 change: 1 addition & 0 deletions br/cmd/br/stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ func NewStreamCommand() *cobra.Command {
command.Root().HelpFunc()(command, strings)
})

command.Hidden = true
return command
}

Expand Down
12 changes: 4 additions & 8 deletions br/pkg/backup/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/br/pkg/summary"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta"
Expand Down Expand Up @@ -480,20 +481,15 @@ func WriteBackupDDLJobs(metaWriter *metautil.MetaWriter, store kv.Storage, lastB
if err != nil {
return errors.Trace(err)
}
allJobs := make([]*model.Job, 0)
defaultJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.DefaultJobListKey)
allJobs, err := ddl.GetAllDDLJobs(snapMeta)
if err != nil {
return errors.Trace(err)
}
log.Debug("get default jobs", zap.Int("jobs", len(defaultJobs)))
allJobs = append(allJobs, defaultJobs...)
addIndexJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.AddIndexJobListKey)
log.Debug("get all jobs", zap.Int("jobs", len(allJobs)))
if err != nil {
return errors.Trace(err)
}
log.Debug("get add index jobs", zap.Int("jobs", len(addIndexJobs)))
allJobs = append(allJobs, addIndexJobs...)
historyJobs, err := snapMeta.GetAllHistoryDDLJobs()
historyJobs, err := ddl.GetAllHistoryDDLJobs(snapMeta)
if err != nil {
return errors.Trace(err)
}
Expand Down
5 changes: 5 additions & 0 deletions br/pkg/restore/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -2016,3 +2016,8 @@ func (rc *Client) SaveSchemas(
}
return nil
}

// MockClient create a fake client used to test.
func MockClient(dbs map[string]*utils.Database) *Client {
return &Client{databases: dbs}
}
14 changes: 8 additions & 6 deletions br/pkg/task/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -300,29 +300,31 @@ func CheckRestoreDBAndTable(client *restore.Client, cfg *RestoreConfig) error {
schemasMap := make(map[string]struct{})
tablesMap := make(map[string]struct{})
for _, db := range schemas {
dbName := db.Info.Name.O
if name, ok := utils.GetSysDBName(db.Info.Name); utils.IsSysDB(name) && ok {
dbName = name
dbName := db.Info.Name.L
if dbCIStrName, ok := utils.GetSysDBCIStrName(db.Info.Name); utils.IsSysDB(dbCIStrName.O) && ok {
dbName = dbCIStrName.L
}
schemasMap[utils.EncloseName(dbName)] = struct{}{}
for _, table := range db.Tables {
if table.Info == nil {
// we may back up empty database.
continue
}
tablesMap[utils.EncloseDBAndTable(dbName, table.Info.Name.O)] = struct{}{}
tablesMap[utils.EncloseDBAndTable(dbName, table.Info.Name.L)] = struct{}{}
}
}
restoreSchemas := cfg.Schemas
restoreTables := cfg.Tables
for schema := range restoreSchemas {
if _, ok := schemasMap[schema]; !ok {
schemaLName := strings.ToLower(schema)
if _, ok := schemasMap[schemaLName]; !ok {
return errors.Annotatef(berrors.ErrUndefinedRestoreDbOrTable,
"[database: %v] has not been backup, please ensure you has input a correct database name", schema)
}
}
for table := range restoreTables {
if _, ok := tablesMap[table]; !ok {
tableLName := strings.ToLower(table)
if _, ok := tablesMap[tableLName]; !ok {
return errors.Annotatef(berrors.ErrUndefinedRestoreDbOrTable,
"[table: %v] has not been backup, please ensure you has input a correct table name", table)
}
Expand Down
194 changes: 194 additions & 0 deletions br/pkg/task/restore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,21 @@ package task

import (
"context"
"encoding/json"
"fmt"
"testing"

"github.com/golang/protobuf/proto"
backuppb "github.com/pingcap/kvproto/pkg/brpb"
"github.com/pingcap/kvproto/pkg/encryptionpb"
"github.com/pingcap/tidb/br/pkg/conn"
"github.com/pingcap/tidb/br/pkg/metautil"
"github.com/pingcap/tidb/br/pkg/restore"
"github.com/pingcap/tidb/br/pkg/storage"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/statistics/handle"
"github.com/pingcap/tidb/tablecodec"
"github.com/stretchr/testify/require"
)

Expand Down Expand Up @@ -41,3 +52,186 @@ func TestconfigureRestoreClient(t *testing.T) {
require.Equal(t, client.GetBatchDdlSize(), 128)
require.True(t, true, client.IsOnline())
}

func TestCheckRestoreDBAndTable(t *testing.T) {
cases := []struct {
cfgSchemas map[string]struct{}
cfgTables map[string]struct{}
backupDBs map[string]*utils.Database
}{
{
cfgSchemas: map[string]struct{}{
utils.EncloseName("test"): {},
},
cfgTables: map[string]struct{}{
utils.EncloseDBAndTable("test", "t"): {},
utils.EncloseDBAndTable("test", "t2"): {},
},
backupDBs: mockReadSchemasFromBackupMeta(t, map[string][]string{
"test": {"T", "T2"},
}),
},
{
cfgSchemas: map[string]struct{}{
utils.EncloseName("mysql"): {},
},
cfgTables: map[string]struct{}{
utils.EncloseDBAndTable("mysql", "t"): {},
utils.EncloseDBAndTable("mysql", "t2"): {},
},
backupDBs: mockReadSchemasFromBackupMeta(t, map[string][]string{
"__TiDB_BR_Temporary_mysql": {"T", "T2"},
}),
},
{
cfgSchemas: map[string]struct{}{
utils.EncloseName("test"): {},
},
cfgTables: map[string]struct{}{
utils.EncloseDBAndTable("test", "T"): {},
utils.EncloseDBAndTable("test", "T2"): {},
},
backupDBs: mockReadSchemasFromBackupMeta(t, map[string][]string{
"test": {"t", "t2"},
}),
},
{
cfgSchemas: map[string]struct{}{
utils.EncloseName("TEST"): {},
},
cfgTables: map[string]struct{}{
utils.EncloseDBAndTable("TEST", "t"): {},
utils.EncloseDBAndTable("TEST", "T2"): {},
},
backupDBs: mockReadSchemasFromBackupMeta(t, map[string][]string{
"test": {"t", "t2"},
}),
},
{
cfgSchemas: map[string]struct{}{
utils.EncloseName("TeSt"): {},
},
cfgTables: map[string]struct{}{
utils.EncloseDBAndTable("TeSt", "tabLe"): {},
utils.EncloseDBAndTable("TeSt", "taBle2"): {},
},
backupDBs: mockReadSchemasFromBackupMeta(t, map[string][]string{
"TesT": {"TablE", "taBle2"},
}),
},
{
cfgSchemas: map[string]struct{}{
utils.EncloseName("TeSt"): {},
utils.EncloseName("MYSQL"): {},
},
cfgTables: map[string]struct{}{
utils.EncloseDBAndTable("TeSt", "tabLe"): {},
utils.EncloseDBAndTable("TeSt", "taBle2"): {},
utils.EncloseDBAndTable("MYSQL", "taBle"): {},
},
backupDBs: mockReadSchemasFromBackupMeta(t, map[string][]string{
"TesT": {"table", "TaBLE2"},
"__TiDB_BR_Temporary_mysql": {"tablE"},
}),
},
}

cfg := &RestoreConfig{}
for _, ca := range cases {
cfg.Schemas = ca.cfgSchemas
cfg.Tables = ca.cfgTables
client := restore.MockClient(ca.backupDBs)

err := CheckRestoreDBAndTable(client, cfg)
require.NoError(t, err)
}
}

func mockReadSchemasFromBackupMeta(t *testing.T, db2Tables map[string][]string) map[string]*utils.Database {
testDir := t.TempDir()
store, err := storage.NewLocalStorage(testDir)
require.NoError(t, err)

mockSchemas := make([]*backuppb.Schema, 0)
var dbID int64 = 1
for db, tables := range db2Tables {
dbName := model.NewCIStr(db)
mockTblList := make([]*model.TableInfo, 0)
tblBytesList, statsBytesList := make([][]byte, 0), make([][]byte, 0)

for i, table := range tables {
tblName := model.NewCIStr(table)
mockTbl := &model.TableInfo{
ID: dbID*100 + int64(i),
Name: tblName,
}
mockTblList = append(mockTblList, mockTbl)

mockStats := handle.JSONTable{
DatabaseName: dbName.String(),
TableName: tblName.String(),
}

tblBytes, err := json.Marshal(mockTbl)
require.NoError(t, err)
tblBytesList = append(tblBytesList, tblBytes)

statsBytes, err := json.Marshal(mockStats)
require.NoError(t, err)
statsBytesList = append(statsBytesList, statsBytes)
}

mockDB := model.DBInfo{
ID: dbID,
Name: dbName,
Tables: mockTblList,
}
dbID++
dbBytes, err := json.Marshal(mockDB)
require.NoError(t, err)

for i := 0; i < len(tblBytesList); i++ {
mockSchemas = append(mockSchemas, &backuppb.Schema{
Db: dbBytes,
Table: tblBytesList[i],
Stats: statsBytesList[i],
},
)
}
}

mockFiles := []*backuppb.File{
{
Name: fmt.Sprintf("%p.sst", &mockSchemas),
StartKey: tablecodec.EncodeRowKey(1, []byte("a")),
EndKey: tablecodec.EncodeRowKey(2, []byte("a")),
},
}

meta := mockBackupMeta(mockSchemas, mockFiles)
data, err := proto.Marshal(meta)
require.NoError(t, err)

ctx := context.Background()
err = store.WriteFile(ctx, metautil.MetaFile, data)
require.NoError(t, err)

dbs, err := utils.LoadBackupTables(
ctx,
metautil.NewMetaReader(
meta,
store,
&backuppb.CipherInfo{
CipherType: encryptionpb.EncryptionMethod_PLAINTEXT,
}),
)
require.NoError(t, err)
return dbs
}

func mockBackupMeta(mockSchemas []*backuppb.Schema, mockFiles []*backuppb.File) *backuppb.BackupMeta {
return &backuppb.BackupMeta{
Files: mockFiles,
Schemas: mockSchemas,
}
}
10 changes: 10 additions & 0 deletions br/pkg/utils/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,13 @@ func GetSysDBName(tempDB model.CIStr) (string, bool) {
}
return tempDB.O[len(temporaryDBNamePrefix):], true
}

// GetSysDBCIStrName get the CIStr name of system DB
func GetSysDBCIStrName(tempDB model.CIStr) (model.CIStr, bool) {
if ok := strings.HasPrefix(tempDB.O, temporaryDBNamePrefix); !ok {
return tempDB, false
}
tempDB.O = tempDB.O[len(temporaryDBNamePrefix):]
tempDB.L = tempDB.L[len(temporaryDBNamePrefix):]
return tempDB, true
}
22 changes: 13 additions & 9 deletions cmd/explaintest/r/explain_easy.result
Original file line number Diff line number Diff line change
Expand Up @@ -511,15 +511,19 @@ PRIMARY KEY (`id`)
explain format = 'brief' SELECT COUNT(1) FROM (SELECT COALESCE(b.region_name, '不详') region_name, SUM(a.registration_num) registration_num FROM (SELECT stat_date, show_date, region_id, 0 registration_num FROM test01 WHERE period = 1 AND stat_date >= 20191202 AND stat_date <= 20191202 UNION ALL SELECT stat_date, show_date, region_id, registration_num registration_num FROM test01 WHERE period = 1 AND stat_date >= 20191202 AND stat_date <= 20191202) a LEFT JOIN test02 b ON a.region_id = b.id WHERE registration_num > 0 AND a.stat_date >= '20191202' AND a.stat_date <= '20191202' GROUP BY a.stat_date , a.show_date , COALESCE(b.region_name, '不详') ) JLS;
id estRows task access object operator info
StreamAgg 1.00 root funcs:count(1)->Column#22
└─HashAgg 1.00 root group by:Column#32, Column#33, Column#34, funcs:count(1)->Column#31
└─Projection 0.01 root Column#14, Column#15, coalesce(test.test02.region_name, 不详)->Column#34
└─IndexJoin 0.01 root left outer join, inner:TableReader, outer key:Column#16, inner key:test.test02.id, equal cond:eq(Column#16, test.test02.id)
├─Union(Build) 0.01 root
│ ├─TableDual 0.00 root rows:0
│ └─Projection 0.01 root test.test01.stat_date, test.test01.show_date, test.test01.region_id
│ └─TableReader 0.01 root data:Selection
│ └─Selection 0.01 cop[tikv] eq(test.test01.period, 1), ge(test.test01.stat_date, 20191202), gt(cast(test.test01.registration_num, bigint(20) BINARY), 0), le(test.test01.stat_date, 20191202)
│ └─TableFullScan 10000.00 cop[tikv] table:test01 keep order:false, stats:pseudo
└─HashAgg 2.50 root group by:Column#47, Column#48, Column#49, funcs:count(1)->Column#36
└─Projection 2.50 root Column#14, Column#15, coalesce(test.test02.region_name, 不详)->Column#49
└─IndexJoin 2.50 root left outer join, inner:TableReader, outer key:Column#16, inner key:test.test02.id, equal cond:eq(Column#16, test.test02.id)
├─HashAgg(Build) 2.00 root group by:Column#14, Column#15, Column#16, funcs:firstrow(Column#33)->Column#14, funcs:firstrow(Column#34)->Column#15, funcs:firstrow(Column#35)->Column#16, funcs:count(1)->Column#37
│ └─Union 2.00 root
│ ├─Projection 1.00 root Column#33, Column#34, Column#35, Column#14, Column#15, Column#16
│ │ └─HashAgg 1.00 root group by:test.test01.region_id, test.test01.show_date, test.test01.stat_date, funcs:firstrow(test.test01.stat_date)->Column#33, funcs:firstrow(test.test01.show_date)->Column#34, funcs:firstrow(test.test01.region_id)->Column#35, funcs:firstrow(test.test01.stat_date)->Column#14, funcs:firstrow(test.test01.show_date)->Column#15, funcs:firstrow(test.test01.region_id)->Column#16, funcs:count(1)->Column#38
│ │ └─TableDual 0.00 root rows:0
│ └─Projection 1.00 root Column#33, Column#34, Column#35, Column#14, Column#15, Column#16
│ └─HashAgg 1.00 root group by:test.test01.region_id, test.test01.show_date, test.test01.stat_date, funcs:firstrow(test.test01.stat_date)->Column#33, funcs:firstrow(test.test01.show_date)->Column#34, funcs:firstrow(test.test01.region_id)->Column#35, funcs:firstrow(test.test01.stat_date)->Column#14, funcs:firstrow(test.test01.show_date)->Column#15, funcs:firstrow(test.test01.region_id)->Column#16, funcs:count(1)->Column#39
│ └─TableReader 0.01 root data:Selection
│ └─Selection 0.01 cop[tikv] eq(test.test01.period, 1), ge(test.test01.stat_date, 20191202), gt(cast(test.test01.registration_num, bigint(20) BINARY), 0), le(test.test01.stat_date, 20191202)
│ └─TableFullScan 10000.00 cop[tikv] table:test01 keep order:false, stats:pseudo
└─TableReader(Probe) 1.00 root data:TableRangeScan
└─TableRangeScan 1.00 cop[tikv] table:b range: decided by [Column#16], keep order:false, stats:pseudo
drop table if exists t;
Expand Down
Loading

0 comments on commit 243e056

Please sign in to comment.