From a31f1517e42d5aaca50ef774be0ef12704b2d9a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B1=B1=E5=B2=9A?= <36239017+YuJuncen@users.noreply.github.com> Date: Mon, 4 Nov 2024 17:52:42 +0800 Subject: [PATCH 01/17] br/operator: added some commands for managing migrations (#56857) close pingcap/tidb#56760 --- br/cmd/br/operator.go | 57 ++++++++ br/pkg/task/operator/BUILD.bazel | 10 +- br/pkg/task/operator/base64ify.go | 38 +++++ br/pkg/task/operator/config.go | 138 ++++++++++++++++++ br/pkg/task/operator/list_migration.go | 53 +++++++ br/pkg/task/operator/migrate_to.go | 131 +++++++++++++++++ .../task/operator/{cmd.go => prepare_snap.go} | 0 7 files changed, 426 insertions(+), 1 deletion(-) create mode 100644 br/pkg/task/operator/base64ify.go create mode 100644 br/pkg/task/operator/list_migration.go create mode 100644 br/pkg/task/operator/migrate_to.go rename br/pkg/task/operator/{cmd.go => prepare_snap.go} (100%) diff --git a/br/cmd/br/operator.go b/br/cmd/br/operator.go index 027013717293c..4e41adeab329f 100644 --- a/br/cmd/br/operator.go +++ b/br/cmd/br/operator.go @@ -32,6 +32,9 @@ func newOperatorCommand() *cobra.Command { cmd.AddCommand(newPrepareForSnapshotBackupCommand( "prepare-for-snapshot-backup", "pause gc, schedulers and importing until the program exits, for snapshot backup.")) + cmd.AddCommand(newBase64ifyCommand()) + cmd.AddCommand(newListMigrationsCommand()) + cmd.AddCommand(newMigrateToCommand()) return cmd } @@ -52,3 +55,57 @@ func newPrepareForSnapshotBackupCommand(use string, short string) *cobra.Command operator.DefineFlagsForPrepareSnapBackup(cmd.Flags()) return cmd } + +func newBase64ifyCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "base64ify [-r] -s ", + Short: "generate base64 for a storage. this may be passed to `tikv-ctl compact-log-backup`.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cfg := operator.Base64ifyConfig{} + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err + } + ctx := GetDefaultContext() + return operator.Base64ify(ctx, cfg) + }, + } + operator.DefineFlagsForBase64ifyConfig(cmd.Flags()) + return cmd +} + +func newListMigrationsCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-migrations", + Short: "list all migrations", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cfg := operator.ListMigrationConfig{} + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err + } + ctx := GetDefaultContext() + return operator.RunListMigrations(ctx, cfg) + }, + } + operator.DefineFlagsForListMigrationConfig(cmd.Flags()) + return cmd +} + +func newMigrateToCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "migrate-to", + Short: "migrate to a specific version", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cfg := operator.MigrateToConfig{} + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err + } + ctx := GetDefaultContext() + return operator.RunMigrateTo(ctx, cfg) + }, + } + operator.DefineFlagsForMigrateToConfig(cmd.Flags()) + return cmd +} diff --git a/br/pkg/task/operator/BUILD.bazel b/br/pkg/task/operator/BUILD.bazel index c7b8bbeb4ea23..14760027a49b8 100644 --- a/br/pkg/task/operator/BUILD.bazel +++ b/br/pkg/task/operator/BUILD.bazel @@ -3,20 +3,28 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "operator", srcs = [ - "cmd.go", + "base64ify.go", "config.go", + "list_migration.go", + "migrate_to.go", + "prepare_snap.go", ], importpath = "github.com/pingcap/tidb/br/pkg/task/operator", visibility = ["//visibility:public"], deps = [ "//br/pkg/backup/prepare_snap", "//br/pkg/errors", + "//br/pkg/glue", "//br/pkg/logutil", "//br/pkg/pdutil", + "//br/pkg/storage", + "//br/pkg/stream", "//br/pkg/task", "//br/pkg/utils", + "@com_github_fatih_color//:color", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", + "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_log//:log", "@com_github_spf13_pflag//:pflag", "@com_github_tikv_client_go_v2//tikv", diff --git a/br/pkg/task/operator/base64ify.go b/br/pkg/task/operator/base64ify.go new file mode 100644 index 0000000000000..ae715e5a69412 --- /dev/null +++ b/br/pkg/task/operator/base64ify.go @@ -0,0 +1,38 @@ +package operator + +import ( + "context" + "encoding/base64" + "fmt" + "os" + + "github.com/fatih/color" + "github.com/pingcap/tidb/br/pkg/storage" +) + +func Base64ify(ctx context.Context, cfg Base64ifyConfig) error { + return runEncode(ctx, cfg) // Assuming runEncode will be similarly modified to accept Base64ifyConfig +} + +func runEncode(ctx context.Context, cfg Base64ifyConfig) error { + s, err := storage.ParseBackend(cfg.StorageURI, &cfg.BackendOptions) + if err != nil { + return err + } + if cfg.LoadCerd { + _, err := storage.New(ctx, s, &storage.ExternalStorageOptions{ + SendCredentials: true, + }) + if err != nil { + return err + } + fmt.Fprintln(os.Stderr, color.HiRedString("Credientials are encoded to the base64 string. DON'T share this with untrusted people!")) + } + + sBytes, err := s.Marshal() + if err != nil { + return err + } + fmt.Println(base64.StdEncoding.EncodeToString(sBytes)) + return nil +} diff --git a/br/pkg/task/operator/config.go b/br/pkg/task/operator/config.go index 693d4908bdee6..c42382abe504d 100644 --- a/br/pkg/task/operator/config.go +++ b/br/pkg/task/operator/config.go @@ -5,6 +5,9 @@ package operator import ( "time" + "github.com/pingcap/errors" + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/task" "github.com/spf13/pflag" ) @@ -42,3 +45,138 @@ func (cfg *PauseGcConfig) ParseFromFlags(flags *pflag.FlagSet) error { return nil } + +type Base64ifyConfig struct { + storage.BackendOptions + StorageURI string + LoadCerd bool +} + +func DefineFlagsForBase64ifyConfig(flags *pflag.FlagSet) { + storage.DefineFlags(flags) + flags.StringP("storage", "s", "", "The external storage input.") + flags.Bool("load-creds", false, "whether loading the credientials from current environment and marshal them to the base64 string. [!]") +} + +func (cfg *Base64ifyConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + err = cfg.BackendOptions.ParseFromFlags(flags) + if err != nil { + return err + } + cfg.StorageURI, err = flags.GetString("storage") + if err != nil { + return err + } + cfg.LoadCerd, err = flags.GetBool("load-creds") + if err != nil { + return err + } + return nil +} + +type ListMigrationConfig struct { + storage.BackendOptions + StorageURI string + JSONOutput bool +} + +func DefineFlagsForListMigrationConfig(flags *pflag.FlagSet) { + storage.DefineFlags(flags) + flags.StringP("storage", "s", "", "the external storage input.") + flags.Bool("json", false, "output the result in json format.") +} + +func (cfg *ListMigrationConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + err = cfg.BackendOptions.ParseFromFlags(flags) + if err != nil { + return err + } + cfg.StorageURI, err = flags.GetString("storage") + if err != nil { + return err + } + cfg.JSONOutput, err = flags.GetBool("json") + if err != nil { + return err + } + return nil +} + +type MigrateToConfig struct { + storage.BackendOptions + StorageURI string + Recent bool + MigrateTo int + Base bool + + Yes bool + DryRun bool +} + +const ( + flagStorage = "storage" + flagRecent = "recent" + flagTo = "to" + flagBase = "base" + flagYes = "yes" + flagDryRun = "dry-run" +) + +func DefineFlagsForMigrateToConfig(flags *pflag.FlagSet) { + storage.DefineFlags(flags) + flags.StringP(flagStorage, "s", "", "the external storage input.") + flags.Bool(flagRecent, true, "migrate to the most recent migration and BASE.") + flags.Int(flagTo, 0, "migrate all migrations from the BASE to the specified sequence number.") + flags.Bool(flagBase, false, "don't merge any migrations, just retry run pending operations in BASE.") + flags.BoolP(flagYes, "y", false, "skip all effect estimating and confirming. execute directly.") + flags.Bool(flagDryRun, false, "do not actually perform the migration, just print the effect.") +} + +func (cfg *MigrateToConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + err = cfg.BackendOptions.ParseFromFlags(flags) + if err != nil { + return err + } + cfg.StorageURI, err = flags.GetString(flagStorage) + if err != nil { + return err + } + cfg.Recent, err = flags.GetBool(flagRecent) + if err != nil { + return err + } + cfg.MigrateTo, err = flags.GetInt(flagTo) + if err != nil { + return err + } + cfg.Base, err = flags.GetBool(flagBase) + if err != nil { + return err + } + cfg.Yes, err = flags.GetBool(flagYes) + if err != nil { + return err + } + cfg.DryRun, err = flags.GetBool(flagDryRun) + if err != nil { + return err + } + return nil +} + +func (cfg *MigrateToConfig) Verify() error { + if cfg.Recent && cfg.MigrateTo != 0 { + return errors.Annotatef(berrors.ErrInvalidArgument, + "the --%s and --%s flag cannot be used at the same time", + flagRecent, flagTo) + } + if cfg.Base && (cfg.Recent || cfg.MigrateTo != 0) { + return errors.Annotatef(berrors.ErrInvalidArgument, + "the --%s and ( --%s or --%s ) flag cannot be used at the same time", + flagBase, flagTo, flagRecent) + } + return nil +} diff --git a/br/pkg/task/operator/list_migration.go b/br/pkg/task/operator/list_migration.go new file mode 100644 index 0000000000000..d6c7efd57197a --- /dev/null +++ b/br/pkg/task/operator/list_migration.go @@ -0,0 +1,53 @@ +package operator + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/fatih/color" + "github.com/pingcap/tidb/br/pkg/glue" + "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/br/pkg/stream" +) + +// statusOK make a string like {message} +func statusOK(message string) string { + return color.GreenString("●") + color.New(color.Bold).Sprintf(" %s", message) +} + +func RunListMigrations(ctx context.Context, cfg ListMigrationConfig) error { + backend, err := storage.ParseBackend(cfg.StorageURI, &cfg.BackendOptions) + if err != nil { + return err + } + st, err := storage.Create(ctx, backend, false) + if err != nil { + return err + } + ext := stream.MigerationExtension(st) + migs, err := ext.Load(ctx) + if err != nil { + return err + } + if cfg.JSONOutput { + if err := json.NewEncoder(os.Stdout).Encode(migs); err != nil { + return err + } + } else { + console := glue.ConsoleOperations{ConsoleGlue: glue.StdIOGlue{}} + console.Println(statusOK(fmt.Sprintf("Total %d Migrations.", len(migs.Layers)+1))) + console.Printf("> BASE <\n") + tbl := console.CreateTable() + stream.AddMigrationToTable(migs.Base, tbl) + tbl.Print() + for _, t := range migs.Layers { + console.Printf("> %08d <\n", t.SeqNum) + tbl := console.CreateTable() + stream.AddMigrationToTable(&t.Content, tbl) + tbl.Print() + } + } + return nil +} diff --git a/br/pkg/task/operator/migrate_to.go b/br/pkg/task/operator/migrate_to.go new file mode 100644 index 0000000000000..20f76b0f86967 --- /dev/null +++ b/br/pkg/task/operator/migrate_to.go @@ -0,0 +1,131 @@ +package operator + +import ( + "context" + + "github.com/fatih/color" + "github.com/pingcap/errors" + backup "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/tidb/br/pkg/glue" + "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/br/pkg/stream" +) + +func (cfg *MigrateToConfig) getTargetVersion(migs stream.Migrations) (int, bool) { + if cfg.Recent { + if len(migs.Layers) == 0 { + return 0, false + } + return migs.Layers[0].SeqNum, true + } + if cfg.Base { + return 0, true + } + return cfg.MigrateTo, true +} + +type migrateToCtx struct { + cfg MigrateToConfig + console glue.ConsoleOperations + est stream.MigrationExt +} + +func (cx migrateToCtx) printErr(errs []error, msg string) { + if len(errs) > 0 { + cx.console.Println(msg) + for _, w := range errs { + cx.console.Printf("- %s\n", color.HiRedString(w.Error())) + } + } +} + +func (cx migrateToCtx) askForContinue(targetMig *backup.Migration) bool { + tbl := cx.console.CreateTable() + stream.AddMigrationToTable(targetMig, tbl) + cx.console.Println("The migration going to be executed will be like: ") + tbl.Print() + + return cx.console.PromptBool("Continue? ") +} + +func (cx migrateToCtx) dryRun(f func(stream.MigrationExt) stream.MergeAndMigratedTo) error { + var ( + est = cx.est + console = cx.console + estBase stream.MergeAndMigratedTo + effects []storage.Effect + ) + effects = est.DryRun(func(me stream.MigrationExt) { + estBase = f(me) + }) + + tbl := console.CreateTable() + stream.AddMigrationToTable(estBase.NewBase, tbl) + console.Println("The new BASE migration will be like: ") + tbl.Print() + file, err := storage.SaveJSONEffectsToTmp(effects) + if err != nil { + return errors.Trace(err) + } + console.Printf("%s effects will happen in the external storage, you may check them in %s\n", + color.HiRedString("%d", len(effects)), + color.New(color.Bold).Sprint(file)) + cx.printErr(estBase.Warnings, "The following errors happened during estimating: ") + return nil +} + +func RunMigrateTo(ctx context.Context, cfg MigrateToConfig) error { + if err := cfg.Verify(); err != nil { + return err + } + + backend, err := storage.ParseBackend(cfg.StorageURI, &cfg.BackendOptions) + if err != nil { + return err + } + st, err := storage.Create(context.Background(), backend, false) + if err != nil { + return err + } + + console := glue.ConsoleOperations{ConsoleGlue: glue.StdIOGlue{}} + + est := stream.MigerationExtension(st) + est.Hooks = stream.NewProgressBarHooks(console) + migs, err := est.Load(ctx) + if err != nil { + return err + } + + cx := migrateToCtx{ + cfg: cfg, + console: console, + est: est, + } + + targetVersion, ok := cfg.getTargetVersion(migs) + if !ok { + console.Printf("No recent migration found. Skipping.") + return nil + } + + run := func(f func(stream.MigrationExt) stream.MergeAndMigratedTo) error { + result := f(est) + if len(result.Warnings) > 0 { + console.Printf("The following errors happened, you may re-execute to retry: ") + for _, w := range result.Warnings { + console.Printf("- %s\n", color.HiRedString(w.Error())) + } + } + return nil + } + if cfg.DryRun { + run = cx.dryRun + } + + return run(func(est stream.MigrationExt) stream.MergeAndMigratedTo { + return est.MergeAndMigrateTo(ctx, targetVersion, stream.MMOptInteractiveCheck(func(ctx context.Context, m *backup.Migration) bool { + return cfg.Yes || cx.askForContinue(m) + })) + }) +} diff --git a/br/pkg/task/operator/cmd.go b/br/pkg/task/operator/prepare_snap.go similarity index 100% rename from br/pkg/task/operator/cmd.go rename to br/pkg/task/operator/prepare_snap.go From ca99dedad3d42f28dd7385cf0c5d64c02d5646e6 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 7 Nov 2024 16:42:21 +0800 Subject: [PATCH 02/17] Restore: refine split code for both compacted sst files and logs (#56917) ref pingcap/tidb#56522 --- br/pkg/conn/BUILD.bazel | 2 +- br/pkg/conn/conn_test.go | 14 +- br/pkg/restore/BUILD.bazel | 2 +- br/pkg/restore/import_mode_switcher_test.go | 18 +- br/pkg/restore/internal/log_split/BUILD.bazel | 54 -- .../restore/internal/log_split/export_test.go | 29 -- br/pkg/restore/internal/log_split/split.go | 376 -------------- .../restore/internal/log_split/split_test.go | 230 --------- .../restore/internal/snap_split/BUILD.bazel | 31 -- br/pkg/restore/internal/snap_split/split.go | 127 ----- .../restore/internal/snap_split/split_test.go | 158 ------ br/pkg/restore/log_client/BUILD.bazel | 1 - br/pkg/restore/log_client/client.go | 5 +- br/pkg/restore/log_client/client_test.go | 7 +- .../restore/log_client/import_retry_test.go | 5 +- br/pkg/restore/misc_test.go | 8 +- br/pkg/restore/snap_client/BUILD.bazel | 3 +- br/pkg/restore/snap_client/client.go | 3 +- br/pkg/restore/snap_client/client_test.go | 16 +- br/pkg/restore/snap_client/import_test.go | 6 +- .../placement_rule_manager_test.go | 10 +- .../snap_client/systable_restore_test.go | 4 +- br/pkg/restore/snap_client/tikv_sender.go | 5 +- br/pkg/restore/split/BUILD.bazel | 15 +- br/pkg/restore/split/mock_pd_client.go | 424 ++++++++++++++++ br/pkg/restore/split/split.go | 462 ++++++++++++++++++ br/pkg/restore/split/split_test.go | 355 ++++++++++++++ .../log_split => split}/sum_sorted.go | 2 +- .../log_split => split}/sum_sorted_test.go | 40 +- br/pkg/task/BUILD.bazel | 1 + br/pkg/task/restore_test.go | 3 +- br/pkg/utiltest/BUILD.bazel | 12 +- br/pkg/utiltest/fake.go | 296 ----------- pkg/lightning/backend/local/BUILD.bazel | 1 - .../backend/local/local_check_test.go | 4 +- 35 files changed, 1335 insertions(+), 1394 deletions(-) delete mode 100644 br/pkg/restore/internal/log_split/BUILD.bazel delete mode 100644 br/pkg/restore/internal/log_split/export_test.go delete mode 100644 br/pkg/restore/internal/log_split/split.go delete mode 100644 br/pkg/restore/internal/log_split/split_test.go delete mode 100644 br/pkg/restore/internal/snap_split/BUILD.bazel delete mode 100644 br/pkg/restore/internal/snap_split/split.go delete mode 100644 br/pkg/restore/internal/snap_split/split_test.go rename br/pkg/restore/{internal/log_split => split}/sum_sorted.go (99%) rename br/pkg/restore/{internal/log_split => split}/sum_sorted_test.go (87%) delete mode 100644 br/pkg/utiltest/fake.go diff --git a/br/pkg/conn/BUILD.bazel b/br/pkg/conn/BUILD.bazel index b7d5e72579e78..c6f7b693d6e3d 100644 --- a/br/pkg/conn/BUILD.bazel +++ b/br/pkg/conn/BUILD.bazel @@ -52,7 +52,7 @@ go_test( "//br/pkg/config", "//br/pkg/conn/util", "//br/pkg/pdutil", - "//br/pkg/utiltest", + "//br/pkg/restore/split", "//pkg/testkit/testsetup", "@com_github_docker_go_units//:go-units", "@com_github_pingcap_errors//:errors", diff --git a/br/pkg/conn/conn_test.go b/br/pkg/conn/conn_test.go index e15fc747eb837..43b0e7f9f0eb3 100644 --- a/br/pkg/conn/conn_test.go +++ b/br/pkg/conn/conn_test.go @@ -18,7 +18,7 @@ import ( "github.com/pingcap/tidb/br/pkg/conn" "github.com/pingcap/tidb/br/pkg/conn/util" "github.com/pingcap/tidb/br/pkg/pdutil" - "github.com/pingcap/tidb/br/pkg/utiltest" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/stretchr/testify/require" "go.uber.org/multierr" "google.golang.org/grpc/codes" @@ -62,7 +62,7 @@ func TestGetAllTiKVStoresWithRetryCancel(t *testing.T) { }, } - fpdc := utiltest.NewFakePDClient(stores, false, nil) + fpdc := split.NewFakePDClient(stores, false, nil) _, err = conn.GetAllTiKVStoresWithRetry(ctx, fpdc, util.SkipTiFlash) require.Error(t, err) @@ -108,7 +108,7 @@ func TestGetAllTiKVStoresWithUnknown(t *testing.T) { }, } - fpdc := utiltest.NewFakePDClient(stores, false, nil) + fpdc := split.NewFakePDClient(stores, false, nil) _, err = conn.GetAllTiKVStoresWithRetry(ctx, fpdc, util.SkipTiFlash) require.Error(t, err) @@ -164,7 +164,7 @@ func TestCheckStoresAlive(t *testing.T) { }, } - fpdc := utiltest.NewFakePDClient(stores, false, nil) + fpdc := split.NewFakePDClient(stores, false, nil) kvStores, err := conn.GetAllTiKVStoresWithRetry(ctx, fpdc, util.SkipTiFlash) require.NoError(t, err) @@ -251,7 +251,7 @@ func TestGetAllTiKVStores(t *testing.T) { } for _, testCase := range testCases { - pdClient := utiltest.NewFakePDClient(testCase.stores, false, nil) + pdClient := split.NewFakePDClient(testCase.stores, false, nil) stores, err := util.GetAllTiKVStores(context.Background(), pdClient, testCase.storeBehavior) if len(testCase.expectedError) != 0 { require.Error(t, err) @@ -421,7 +421,7 @@ func TestGetMergeRegionSizeAndCount(t *testing.T) { pctx := context.Background() for _, ca := range cases { ctx, cancel := context.WithCancel(pctx) - pdCli := utiltest.NewFakePDClient(ca.stores, false, nil) + pdCli := split.NewFakePDClient(ca.stores, false, nil) require.Equal(t, len(ca.content), len(ca.stores)) count := 0 mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -586,7 +586,7 @@ func TestIsLogBackupEnabled(t *testing.T) { pctx := context.Background() for _, ca := range cases { ctx, cancel := context.WithCancel(pctx) - pdCli := utiltest.NewFakePDClient(ca.stores, false, nil) + pdCli := split.NewFakePDClient(ca.stores, false, nil) require.Equal(t, len(ca.content), len(ca.stores)) count := 0 mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/br/pkg/restore/BUILD.bazel b/br/pkg/restore/BUILD.bazel index 0b571e4889f13..5351b6ee4178d 100644 --- a/br/pkg/restore/BUILD.bazel +++ b/br/pkg/restore/BUILD.bazel @@ -52,7 +52,7 @@ go_test( "//br/pkg/conn", "//br/pkg/mock", "//br/pkg/pdutil", - "//br/pkg/utiltest", + "//br/pkg/restore/split", "//pkg/kv", "//pkg/parser/model", "//pkg/session", diff --git a/br/pkg/restore/import_mode_switcher_test.go b/br/pkg/restore/import_mode_switcher_test.go index 75d08c11a9033..41a2cc78f08e9 100644 --- a/br/pkg/restore/import_mode_switcher_test.go +++ b/br/pkg/restore/import_mode_switcher_test.go @@ -28,7 +28,7 @@ import ( "github.com/pingcap/tidb/br/pkg/conn" "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/br/pkg/restore" - "github.com/pingcap/tidb/br/pkg/utiltest" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/stretchr/testify/require" "google.golang.org/grpc" ) @@ -74,13 +74,13 @@ func TestRestorePreWork(t *testing.T) { require.NoError(t, err) }() - pdClient := utiltest.NewFakePDClient([]*metapb.Store{ + pdClient := split.NewFakePDClient([]*metapb.Store{ { Id: 1, Address: fmt.Sprintf(":%d", 51111+port), }, }, false, nil) - pdHTTPCli := utiltest.NewFakePDHTTPClient() + pdHTTPCli := split.NewFakePDHTTPClient() mgr := &conn.Mgr{ PdController: pdutil.NewPdControllerWithPDClient( pdClient, pdHTTPCli, &semver.Version{Major: 4, Minor: 0, Patch: 9}), @@ -96,17 +96,17 @@ func TestRestorePreWork(t *testing.T) { _, ok := pdutil.Schedulers[key] require.True(t, ok) } - require.Equal(t, len(utiltest.ExistPDCfgGeneratorBefore), len(cfg.ScheduleCfg)) + require.Equal(t, len(split.ExistPDCfgGeneratorBefore), len(cfg.ScheduleCfg)) for key, value := range cfg.ScheduleCfg { - expectValue, ok := utiltest.ExistPDCfgGeneratorBefore[key] + expectValue, ok := split.ExistPDCfgGeneratorBefore[key] require.True(t, ok) require.Equal(t, expectValue, value) } cfgs, err := pdHTTPCli.GetConfig(context.TODO()) require.NoError(t, err) - require.Equal(t, len(utiltest.ExpectPDCfgGeneratorsResult), len(cfg.ScheduleCfg)) + require.Equal(t, len(split.ExpectPDCfgGeneratorsResult), len(cfg.ScheduleCfg)) for key, value := range cfgs { - expectValue, ok := utiltest.ExpectPDCfgGeneratorsResult[key[len("schedule."):]] + expectValue, ok := split.ExpectPDCfgGeneratorsResult[key[len("schedule."):]] require.True(t, ok) require.Equal(t, expectValue, value) } @@ -123,9 +123,9 @@ func TestRestorePreWork(t *testing.T) { { cfgs, err := pdHTTPCli.GetConfig(context.TODO()) require.NoError(t, err) - require.Equal(t, len(utiltest.ExistPDCfgGeneratorBefore), len(cfg.ScheduleCfg)) + require.Equal(t, len(split.ExistPDCfgGeneratorBefore), len(cfg.ScheduleCfg)) for key, value := range cfgs { - expectValue, ok := utiltest.ExistPDCfgGeneratorBefore[key[len("schedule."):]] + expectValue, ok := split.ExistPDCfgGeneratorBefore[key[len("schedule."):]] require.True(t, ok) require.Equal(t, expectValue, value) } diff --git a/br/pkg/restore/internal/log_split/BUILD.bazel b/br/pkg/restore/internal/log_split/BUILD.bazel deleted file mode 100644 index d929b04c003ad..0000000000000 --- a/br/pkg/restore/internal/log_split/BUILD.bazel +++ /dev/null @@ -1,54 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "log_split", - srcs = [ - "split.go", - "sum_sorted.go", - ], - importpath = "github.com/pingcap/tidb/br/pkg/restore/internal/log_split", - visibility = ["//br/pkg/restore:__subpackages__"], - deps = [ - "//br/pkg/logutil", - "//br/pkg/restore/internal/snap_split", - "//br/pkg/restore/split", - "//br/pkg/restore/utils", - "//br/pkg/utils", - "//pkg/kv", - "//pkg/tablecodec", - "//pkg/util", - "//pkg/util/codec", - "@com_github_google_btree//:btree", - "@com_github_pingcap_errors//:errors", - "@com_github_pingcap_kvproto//pkg/brpb", - "@com_github_pingcap_log//:log", - "@org_golang_x_sync//errgroup", - "@org_uber_go_zap//:zap", - ], -) - -go_test( - name = "log_split_test", - timeout = "short", - srcs = [ - "export_test.go", - "split_test.go", - "sum_sorted_test.go", - ], - embed = [":log_split"], - flaky = True, - shard_count = 4, - deps = [ - "//br/pkg/restore/internal/snap_split", - "//br/pkg/restore/split", - "//br/pkg/restore/utils", - "//br/pkg/utiltest", - "//pkg/kv", - "//pkg/tablecodec", - "//pkg/util/codec", - "@com_github_docker_go_units//:go-units", - "@com_github_pingcap_kvproto//pkg/brpb", - "@com_github_pingcap_kvproto//pkg/import_sstpb", - "@com_github_stretchr_testify//require", - ], -) diff --git a/br/pkg/restore/internal/log_split/export_test.go b/br/pkg/restore/internal/log_split/export_test.go deleted file mode 100644 index bca3eada27bbb..0000000000000 --- a/br/pkg/restore/internal/log_split/export_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logsplit - -import restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" - -func NewSplitHelperIteratorForTest(helper *SplitHelper, tableID int64, rule *restoreutils.RewriteRules) *splitHelperIterator { - return &splitHelperIterator{ - tableSplitters: []*rewriteSplitter{ - { - tableID: tableID, - rule: rule, - splitter: helper, - }, - }, - } -} diff --git a/br/pkg/restore/internal/log_split/split.go b/br/pkg/restore/internal/log_split/split.go deleted file mode 100644 index eb9b3165ce761..0000000000000 --- a/br/pkg/restore/internal/log_split/split.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logsplit - -import ( - "bytes" - "context" - "sort" - "sync" - "time" - - "github.com/pingcap/errors" - backuppb "github.com/pingcap/kvproto/pkg/brpb" - "github.com/pingcap/log" - snapsplit "github.com/pingcap/tidb/br/pkg/restore/internal/snap_split" - "github.com/pingcap/tidb/br/pkg/restore/split" - restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" - "github.com/pingcap/tidb/pkg/tablecodec" - "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/codec" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -type rewriteSplitter struct { - rewriteKey []byte - tableID int64 - rule *restoreutils.RewriteRules - splitter *SplitHelper -} - -type splitHelperIterator struct { - tableSplitters []*rewriteSplitter -} - -func (iter *splitHelperIterator) Traverse(fn func(v Valued, endKey []byte, rule *restoreutils.RewriteRules) bool) { - for _, entry := range iter.tableSplitters { - endKey := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(entry.tableID+1)) - rule := entry.rule - entry.splitter.Traverse(func(v Valued) bool { - return fn(v, endKey, rule) - }) - } -} - -type LogSplitHelper struct { - tableSplitter map[int64]*SplitHelper - rules map[int64]*restoreutils.RewriteRules - client split.SplitClient - pool *util.WorkerPool - eg *errgroup.Group - regionsCh chan []*split.RegionInfo - - splitThreSholdSize uint64 - splitThreSholdKeys int64 -} - -func NewLogSplitHelper(rules map[int64]*restoreutils.RewriteRules, client split.SplitClient, splitSize uint64, splitKeys int64) *LogSplitHelper { - return &LogSplitHelper{ - tableSplitter: make(map[int64]*SplitHelper), - rules: rules, - client: client, - pool: util.NewWorkerPool(128, "split region"), - eg: nil, - - splitThreSholdSize: splitSize, - splitThreSholdKeys: splitKeys, - } -} - -func (helper *LogSplitHelper) iterator() *splitHelperIterator { - tableSplitters := make([]*rewriteSplitter, 0, len(helper.tableSplitter)) - for tableID, splitter := range helper.tableSplitter { - delete(helper.tableSplitter, tableID) - rewriteRule, exists := helper.rules[tableID] - if !exists { - log.Info("skip splitting due to no table id matched", zap.Int64("tableID", tableID)) - continue - } - newTableID := restoreutils.GetRewriteTableID(tableID, rewriteRule) - if newTableID == 0 { - log.Warn("failed to get the rewrite table id", zap.Int64("tableID", tableID)) - continue - } - tableSplitters = append(tableSplitters, &rewriteSplitter{ - rewriteKey: codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(newTableID)), - tableID: newTableID, - rule: rewriteRule, - splitter: splitter, - }) - } - sort.Slice(tableSplitters, func(i, j int) bool { - return bytes.Compare(tableSplitters[i].rewriteKey, tableSplitters[j].rewriteKey) < 0 - }) - return &splitHelperIterator{ - tableSplitters: tableSplitters, - } -} - -const splitFileThreshold = 1024 * 1024 // 1 MB - -func (helper *LogSplitHelper) skipFile(file *backuppb.DataFileInfo) bool { - _, exist := helper.rules[file.TableId] - return file.Length < splitFileThreshold || file.IsMeta || !exist -} - -func (helper *LogSplitHelper) Merge(file *backuppb.DataFileInfo) { - if helper.skipFile(file) { - return - } - splitHelper, exist := helper.tableSplitter[file.TableId] - if !exist { - splitHelper = NewSplitHelper() - helper.tableSplitter[file.TableId] = splitHelper - } - - splitHelper.Merge(Valued{ - Key: Span{ - StartKey: file.StartKey, - EndKey: file.EndKey, - }, - Value: Value{ - Size: file.Length, - Number: file.NumberOfEntries, - }, - }) -} - -type splitFunc = func(context.Context, *snapsplit.RegionSplitter, uint64, int64, *split.RegionInfo, []Valued) error - -func (helper *LogSplitHelper) splitRegionByPoints( - ctx context.Context, - regionSplitter *snapsplit.RegionSplitter, - initialLength uint64, - initialNumber int64, - region *split.RegionInfo, - valueds []Valued, -) error { - var ( - splitPoints [][]byte = make([][]byte, 0) - lastKey []byte = region.Region.StartKey - length uint64 = initialLength - number int64 = initialNumber - ) - for _, v := range valueds { - // decode will discard ts behind the key, which results in the same key for consecutive ranges - if !bytes.Equal(lastKey, v.GetStartKey()) && (v.Value.Size+length > helper.splitThreSholdSize || v.Value.Number+number > helper.splitThreSholdKeys) { - _, rawKey, _ := codec.DecodeBytes(v.GetStartKey(), nil) - splitPoints = append(splitPoints, rawKey) - length = 0 - number = 0 - } - lastKey = v.GetStartKey() - length += v.Value.Size - number += v.Value.Number - } - - if len(splitPoints) == 0 { - return nil - } - - helper.pool.ApplyOnErrorGroup(helper.eg, func() error { - newRegions, errSplit := regionSplitter.SplitWaitAndScatter(ctx, region, splitPoints) - if errSplit != nil { - log.Warn("failed to split the scaned region", zap.Error(errSplit)) - sort.Slice(splitPoints, func(i, j int) bool { - return bytes.Compare(splitPoints[i], splitPoints[j]) < 0 - }) - return regionSplitter.ExecuteSplit(ctx, splitPoints) - } - select { - case <-ctx.Done(): - return nil - case helper.regionsCh <- newRegions: - } - log.Info("split the region", zap.Uint64("region-id", region.Region.Id), zap.Int("split-point-number", len(splitPoints))) - return nil - }) - return nil -} - -// SplitPoint selects ranges overlapped with each region, and calls `splitF` to split the region -func SplitPoint( - ctx context.Context, - iter *splitHelperIterator, - client split.SplitClient, - splitF splitFunc, -) (err error) { - // common status - var ( - regionSplitter *snapsplit.RegionSplitter = snapsplit.NewRegionSplitter(client) - ) - // region traverse status - var ( - // the region buffer of each scan - regions []*split.RegionInfo = nil - regionIndex int = 0 - ) - // region split status - var ( - // range span +----------------+------+---+-------------+ - // region span +------------------------------------+ - // +initial length+ +end valued+ - // regionValueds is the ranges array overlapped with `regionInfo` - regionValueds []Valued = nil - // regionInfo is the region to be split - regionInfo *split.RegionInfo = nil - // intialLength is the length of the part of the first range overlapped with the region - initialLength uint64 = 0 - initialNumber int64 = 0 - ) - // range status - var ( - // regionOverCount is the number of regions overlapped with the range - regionOverCount uint64 = 0 - ) - - iter.Traverse(func(v Valued, endKey []byte, rule *restoreutils.RewriteRules) bool { - if v.Value.Number == 0 || v.Value.Size == 0 { - return true - } - var ( - vStartKey []byte - vEndKey []byte - ) - // use `vStartKey` and `vEndKey` to compare with region's key - vStartKey, vEndKey, err = restoreutils.GetRewriteEncodedKeys(v, rule) - if err != nil { - return false - } - // traverse to the first region overlapped with the range - for ; regionIndex < len(regions); regionIndex++ { - if bytes.Compare(vStartKey, regions[regionIndex].Region.EndKey) < 0 { - break - } - } - // cannot find any regions overlapped with the range - // need to scan regions again - if regionIndex == len(regions) { - regions = nil - } - regionOverCount = 0 - for { - if regionIndex >= len(regions) { - var startKey []byte - if len(regions) > 0 { - // has traversed over the region buffer, should scan from the last region's end-key of the region buffer - startKey = regions[len(regions)-1].Region.EndKey - } else { - // scan from the range's start-key - startKey = vStartKey - } - // scan at most 64 regions into the region buffer - regions, err = split.ScanRegionsWithRetry(ctx, client, startKey, endKey, 64) - if err != nil { - return false - } - regionIndex = 0 - } - - region := regions[regionIndex] - // this region must be overlapped with the range - regionOverCount++ - // the region is the last one overlapped with the range, - // should split the last recorded region, - // and then record this region as the region to be split - if bytes.Compare(vEndKey, region.Region.EndKey) < 0 { - endLength := v.Value.Size / regionOverCount - endNumber := v.Value.Number / int64(regionOverCount) - if len(regionValueds) > 0 && regionInfo != region { - // add a part of the range as the end part - if bytes.Compare(vStartKey, regionInfo.Region.EndKey) < 0 { - regionValueds = append(regionValueds, NewValued(vStartKey, regionInfo.Region.EndKey, Value{Size: endLength, Number: endNumber})) - } - // try to split the region - err = splitF(ctx, regionSplitter, initialLength, initialNumber, regionInfo, regionValueds) - if err != nil { - return false - } - regionValueds = make([]Valued, 0) - } - if regionOverCount == 1 { - // the region completely contains the range - regionValueds = append(regionValueds, Valued{ - Key: Span{ - StartKey: vStartKey, - EndKey: vEndKey, - }, - Value: v.Value, - }) - } else { - // the region is overlapped with the last part of the range - initialLength = endLength - initialNumber = endNumber - } - regionInfo = region - // try the next range - return true - } - - // try the next region - regionIndex++ - } - }) - - if err != nil { - return errors.Trace(err) - } - if len(regionValueds) > 0 { - // try to split the region - err = splitF(ctx, regionSplitter, initialLength, initialNumber, regionInfo, regionValueds) - if err != nil { - return errors.Trace(err) - } - } - - return nil -} - -func (helper *LogSplitHelper) Split(ctx context.Context) error { - var ectx context.Context - var wg sync.WaitGroup - helper.eg, ectx = errgroup.WithContext(ctx) - helper.regionsCh = make(chan []*split.RegionInfo, 1024) - wg.Add(1) - go func() { - defer wg.Done() - scatterRegions := make([]*split.RegionInfo, 0) - receiveNewRegions: - for { - select { - case <-ctx.Done(): - return - case newRegions, ok := <-helper.regionsCh: - if !ok { - break receiveNewRegions - } - - scatterRegions = append(scatterRegions, newRegions...) - } - } - - regionSplitter := snapsplit.NewRegionSplitter(helper.client) - // It is too expensive to stop recovery and wait for a small number of regions - // to complete scatter, so the maximum waiting time is reduced to 1 minute. - _ = regionSplitter.WaitForScatterRegionsTimeout(ctx, scatterRegions, time.Minute) - }() - - iter := helper.iterator() - if err := SplitPoint(ectx, iter, helper.client, helper.splitRegionByPoints); err != nil { - return errors.Trace(err) - } - - // wait for completion of splitting regions - if err := helper.eg.Wait(); err != nil { - return errors.Trace(err) - } - - // wait for completion of scattering regions - close(helper.regionsCh) - wg.Wait() - - return nil -} diff --git a/br/pkg/restore/internal/log_split/split_test.go b/br/pkg/restore/internal/log_split/split_test.go deleted file mode 100644 index acbf61ae12f29..0000000000000 --- a/br/pkg/restore/internal/log_split/split_test.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logsplit_test - -import ( - "context" - "fmt" - "testing" - - "github.com/docker/go-units" - backuppb "github.com/pingcap/kvproto/pkg/brpb" - "github.com/pingcap/kvproto/pkg/import_sstpb" - logsplit "github.com/pingcap/tidb/br/pkg/restore/internal/log_split" - snapsplit "github.com/pingcap/tidb/br/pkg/restore/internal/snap_split" - "github.com/pingcap/tidb/br/pkg/restore/split" - restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" - "github.com/pingcap/tidb/br/pkg/utiltest" - "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/tablecodec" - "github.com/pingcap/tidb/pkg/util/codec" - "github.com/stretchr/testify/require" -) - -func keyWithTablePrefix(tableID int64, key string) []byte { - rawKey := append(tablecodec.GenTableRecordPrefix(tableID), []byte(key)...) - return codec.EncodeBytes([]byte{}, rawKey) -} - -func TestSplitPoint(t *testing.T) { - ctx := context.Background() - var oldTableID int64 = 50 - var tableID int64 = 100 - rewriteRules := &restoreutils.RewriteRules{ - Data: []*import_sstpb.RewriteRule{ - { - OldKeyPrefix: tablecodec.EncodeTablePrefix(oldTableID), - NewKeyPrefix: tablecodec.EncodeTablePrefix(tableID), - }, - }, - } - - // range: b c d e g i - // +---+ +---+ +---------+ - // +-------------+----------+---------+ - // region: a f h j - splitHelper := logsplit.NewSplitHelper() - splitHelper.Merge(logsplit.Valued{Key: logsplit.Span{StartKey: keyWithTablePrefix(oldTableID, "b"), EndKey: keyWithTablePrefix(oldTableID, "c")}, Value: logsplit.Value{Size: 100, Number: 100}}) - splitHelper.Merge(logsplit.Valued{Key: logsplit.Span{StartKey: keyWithTablePrefix(oldTableID, "d"), EndKey: keyWithTablePrefix(oldTableID, "e")}, Value: logsplit.Value{Size: 200, Number: 200}}) - splitHelper.Merge(logsplit.Valued{Key: logsplit.Span{StartKey: keyWithTablePrefix(oldTableID, "g"), EndKey: keyWithTablePrefix(oldTableID, "i")}, Value: logsplit.Value{Size: 300, Number: 300}}) - client := utiltest.NewFakeSplitClient() - client.AppendRegion(keyWithTablePrefix(tableID, "a"), keyWithTablePrefix(tableID, "f")) - client.AppendRegion(keyWithTablePrefix(tableID, "f"), keyWithTablePrefix(tableID, "h")) - client.AppendRegion(keyWithTablePrefix(tableID, "h"), keyWithTablePrefix(tableID, "j")) - client.AppendRegion(keyWithTablePrefix(tableID, "j"), keyWithTablePrefix(tableID+1, "a")) - - iter := logsplit.NewSplitHelperIteratorForTest(splitHelper, tableID, rewriteRules) - err := logsplit.SplitPoint(ctx, iter, client, func(ctx context.Context, rs *snapsplit.RegionSplitter, u uint64, o int64, ri *split.RegionInfo, v []logsplit.Valued) error { - require.Equal(t, u, uint64(0)) - require.Equal(t, o, int64(0)) - require.Equal(t, ri.Region.StartKey, keyWithTablePrefix(tableID, "a")) - require.Equal(t, ri.Region.EndKey, keyWithTablePrefix(tableID, "f")) - require.EqualValues(t, v[0].Key.StartKey, keyWithTablePrefix(tableID, "b")) - require.EqualValues(t, v[0].Key.EndKey, keyWithTablePrefix(tableID, "c")) - require.EqualValues(t, v[1].Key.StartKey, keyWithTablePrefix(tableID, "d")) - require.EqualValues(t, v[1].Key.EndKey, keyWithTablePrefix(tableID, "e")) - require.Equal(t, len(v), 2) - return nil - }) - require.NoError(t, err) -} - -func getCharFromNumber(prefix string, i int) string { - c := '1' + (i % 10) - b := '1' + (i%100)/10 - a := '1' + i/100 - return fmt.Sprintf("%s%c%c%c", prefix, a, b, c) -} - -func TestSplitPoint2(t *testing.T) { - ctx := context.Background() - var oldTableID int64 = 50 - var tableID int64 = 100 - rewriteRules := &restoreutils.RewriteRules{ - Data: []*import_sstpb.RewriteRule{ - { - OldKeyPrefix: tablecodec.EncodeTablePrefix(oldTableID), - NewKeyPrefix: tablecodec.EncodeTablePrefix(tableID), - }, - }, - } - - // range: b c d e f i j k l n - // +---+ +---+ +-----------------+ +----+ +--------+ - // +---------------+--+.....+----+------------+---------+ - // region: a g >128 h m o - splitHelper := logsplit.NewSplitHelper() - splitHelper.Merge(logsplit.Valued{Key: logsplit.Span{StartKey: keyWithTablePrefix(oldTableID, "b"), EndKey: keyWithTablePrefix(oldTableID, "c")}, Value: logsplit.Value{Size: 100, Number: 100}}) - splitHelper.Merge(logsplit.Valued{Key: logsplit.Span{StartKey: keyWithTablePrefix(oldTableID, "d"), EndKey: keyWithTablePrefix(oldTableID, "e")}, Value: logsplit.Value{Size: 200, Number: 200}}) - splitHelper.Merge(logsplit.Valued{Key: logsplit.Span{StartKey: keyWithTablePrefix(oldTableID, "f"), EndKey: keyWithTablePrefix(oldTableID, "i")}, Value: logsplit.Value{Size: 300, Number: 300}}) - splitHelper.Merge(logsplit.Valued{Key: logsplit.Span{StartKey: keyWithTablePrefix(oldTableID, "j"), EndKey: keyWithTablePrefix(oldTableID, "k")}, Value: logsplit.Value{Size: 200, Number: 200}}) - splitHelper.Merge(logsplit.Valued{Key: logsplit.Span{StartKey: keyWithTablePrefix(oldTableID, "l"), EndKey: keyWithTablePrefix(oldTableID, "n")}, Value: logsplit.Value{Size: 200, Number: 200}}) - client := utiltest.NewFakeSplitClient() - client.AppendRegion(keyWithTablePrefix(tableID, "a"), keyWithTablePrefix(tableID, "g")) - client.AppendRegion(keyWithTablePrefix(tableID, "g"), keyWithTablePrefix(tableID, getCharFromNumber("g", 0))) - for i := 0; i < 256; i++ { - client.AppendRegion(keyWithTablePrefix(tableID, getCharFromNumber("g", i)), keyWithTablePrefix(tableID, getCharFromNumber("g", i+1))) - } - client.AppendRegion(keyWithTablePrefix(tableID, getCharFromNumber("g", 256)), keyWithTablePrefix(tableID, "h")) - client.AppendRegion(keyWithTablePrefix(tableID, "h"), keyWithTablePrefix(tableID, "m")) - client.AppendRegion(keyWithTablePrefix(tableID, "m"), keyWithTablePrefix(tableID, "o")) - client.AppendRegion(keyWithTablePrefix(tableID, "o"), keyWithTablePrefix(tableID+1, "a")) - - firstSplit := true - iter := logsplit.NewSplitHelperIteratorForTest(splitHelper, tableID, rewriteRules) - err := logsplit.SplitPoint(ctx, iter, client, func(ctx context.Context, rs *snapsplit.RegionSplitter, u uint64, o int64, ri *split.RegionInfo, v []logsplit.Valued) error { - if firstSplit { - require.Equal(t, u, uint64(0)) - require.Equal(t, o, int64(0)) - require.Equal(t, ri.Region.StartKey, keyWithTablePrefix(tableID, "a")) - require.Equal(t, ri.Region.EndKey, keyWithTablePrefix(tableID, "g")) - require.EqualValues(t, v[0].Key.StartKey, keyWithTablePrefix(tableID, "b")) - require.EqualValues(t, v[0].Key.EndKey, keyWithTablePrefix(tableID, "c")) - require.EqualValues(t, v[1].Key.StartKey, keyWithTablePrefix(tableID, "d")) - require.EqualValues(t, v[1].Key.EndKey, keyWithTablePrefix(tableID, "e")) - require.EqualValues(t, v[2].Key.StartKey, keyWithTablePrefix(tableID, "f")) - require.EqualValues(t, v[2].Key.EndKey, keyWithTablePrefix(tableID, "g")) - require.Equal(t, v[2].Value.Size, uint64(1)) - require.Equal(t, v[2].Value.Number, int64(1)) - require.Equal(t, len(v), 3) - firstSplit = false - } else { - require.Equal(t, u, uint64(1)) - require.Equal(t, o, int64(1)) - require.Equal(t, ri.Region.StartKey, keyWithTablePrefix(tableID, "h")) - require.Equal(t, ri.Region.EndKey, keyWithTablePrefix(tableID, "m")) - require.EqualValues(t, v[0].Key.StartKey, keyWithTablePrefix(tableID, "j")) - require.EqualValues(t, v[0].Key.EndKey, keyWithTablePrefix(tableID, "k")) - require.EqualValues(t, v[1].Key.StartKey, keyWithTablePrefix(tableID, "l")) - require.EqualValues(t, v[1].Key.EndKey, keyWithTablePrefix(tableID, "m")) - require.Equal(t, v[1].Value.Size, uint64(100)) - require.Equal(t, v[1].Value.Number, int64(100)) - require.Equal(t, len(v), 2) - } - return nil - }) - require.NoError(t, err) -} - -func fakeFile(tableID, rowID int64, length uint64, num int64) *backuppb.DataFileInfo { - return &backuppb.DataFileInfo{ - StartKey: fakeRowKey(tableID, rowID), - EndKey: fakeRowKey(tableID, rowID+1), - TableId: tableID, - Length: length, - NumberOfEntries: num, - } -} - -func fakeRowKey(tableID, rowID int64) kv.Key { - return codec.EncodeBytes(nil, tablecodec.EncodeRecordKey(tablecodec.GenTableRecordPrefix(tableID), kv.IntHandle(rowID))) -} - -func TestLogSplitHelper(t *testing.T) { - ctx := context.Background() - rules := map[int64]*restoreutils.RewriteRules{ - 1: { - Data: []*import_sstpb.RewriteRule{ - { - OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), - NewKeyPrefix: tablecodec.GenTableRecordPrefix(100), - }, - }, - }, - 2: { - Data: []*import_sstpb.RewriteRule{ - { - OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), - NewKeyPrefix: tablecodec.GenTableRecordPrefix(200), - }, - }, - }, - } - oriRegions := [][]byte{ - {}, - codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), - codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), - codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), - } - mockPDCli := split.NewMockPDClientForSplit() - mockPDCli.SetRegions(oriRegions) - client := split.NewClient(mockPDCli, nil, nil, 100, 4) - helper := logsplit.NewLogSplitHelper(rules, client, 4*units.MiB, 400) - - helper.Merge(fakeFile(1, 100, 100, 100)) - helper.Merge(fakeFile(1, 200, 2*units.MiB, 200)) - helper.Merge(fakeFile(2, 100, 3*units.MiB, 300)) - helper.Merge(fakeFile(3, 100, 10*units.MiB, 100000)) - // different regions, no split happens - err := helper.Split(ctx) - require.NoError(t, err) - regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) - require.NoError(t, err) - require.Len(t, regions, 3) - require.Equal(t, []byte{}, regions[0].Meta.StartKey) - require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), regions[1].Meta.StartKey) - require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), regions[2].Meta.StartKey) - require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), regions[2].Meta.EndKey) - - helper.Merge(fakeFile(1, 300, 3*units.MiB, 10)) - helper.Merge(fakeFile(1, 400, 4*units.MiB, 10)) - // trigger to split regions for table 1 - err = helper.Split(ctx) - require.NoError(t, err) - regions, err = mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) - require.NoError(t, err) - require.Len(t, regions, 4) - require.Equal(t, fakeRowKey(100, 400), kv.Key(regions[1].Meta.EndKey)) -} diff --git a/br/pkg/restore/internal/snap_split/BUILD.bazel b/br/pkg/restore/internal/snap_split/BUILD.bazel deleted file mode 100644 index ab6df360220d6..0000000000000 --- a/br/pkg/restore/internal/snap_split/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "snap_split", - srcs = ["split.go"], - importpath = "github.com/pingcap/tidb/br/pkg/restore/internal/snap_split", - visibility = ["//br/pkg/restore:__subpackages__"], - deps = [ - "//br/pkg/restore/split", - "@com_github_pingcap_errors//:errors", - "@com_github_pingcap_log//:log", - "@org_uber_go_zap//:zap", - ], -) - -go_test( - name = "snap_split_test", - timeout = "short", - srcs = ["split_test.go"], - flaky = True, - shard_count = 4, - deps = [ - ":snap_split", - "//br/pkg/restore/split", - "//br/pkg/restore/utils", - "//br/pkg/rtree", - "//pkg/util/codec", - "@com_github_pingcap_kvproto//pkg/import_sstpb", - "@com_github_stretchr_testify//require", - ], -) diff --git a/br/pkg/restore/internal/snap_split/split.go b/br/pkg/restore/internal/snap_split/split.go deleted file mode 100644 index fca4a69cb5e6b..0000000000000 --- a/br/pkg/restore/internal/snap_split/split.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. - -package snapsplit - -import ( - "context" - "time" - - "github.com/pingcap/errors" - "github.com/pingcap/log" - "github.com/pingcap/tidb/br/pkg/restore/split" - "go.uber.org/zap" -) - -// RegionSplitter is a executor of region split by rules. -type RegionSplitter struct { - client split.SplitClient -} - -// NewRegionSplitter returns a new RegionSplitter. -func NewRegionSplitter(client split.SplitClient) *RegionSplitter { - return &RegionSplitter{ - client: client, - } -} - -// SplitWaitAndScatter expose the function `SplitWaitAndScatter` of split client. -func (rs *RegionSplitter) SplitWaitAndScatter(ctx context.Context, region *split.RegionInfo, keys [][]byte) ([]*split.RegionInfo, error) { - return rs.client.SplitWaitAndScatter(ctx, region, keys) -} - -// ExecuteSplit executes regions split and make sure new splitted regions are balance. -// It will split regions by the rewrite rules, -// then it will split regions by the end key of each range. -// tableRules includes the prefix of a table, since some ranges may have -// a prefix with record sequence or index sequence. -// note: all ranges and rewrite rules must have raw key. -func (rs *RegionSplitter) ExecuteSplit( - ctx context.Context, - sortedSplitKeys [][]byte, -) error { - if len(sortedSplitKeys) == 0 { - log.Info("skip split regions, no split keys") - return nil - } - - log.Info("execute split sorted keys", zap.Int("keys count", len(sortedSplitKeys))) - return rs.executeSplitByRanges(ctx, sortedSplitKeys) -} - -func (rs *RegionSplitter) executeSplitByRanges( - ctx context.Context, - sortedKeys [][]byte, -) error { - startTime := time.Now() - // Choose the rough region split keys, - // each splited region contains 128 regions to be splitted. - const regionIndexStep = 128 - - roughSortedSplitKeys := make([][]byte, 0, len(sortedKeys)/regionIndexStep+1) - for curRegionIndex := regionIndexStep; curRegionIndex < len(sortedKeys); curRegionIndex += regionIndexStep { - roughSortedSplitKeys = append(roughSortedSplitKeys, sortedKeys[curRegionIndex]) - } - if len(roughSortedSplitKeys) > 0 { - if err := rs.executeSplitByKeys(ctx, roughSortedSplitKeys); err != nil { - return errors.Trace(err) - } - } - log.Info("finish spliting regions roughly", zap.Duration("take", time.Since(startTime))) - - // Then send split requests to each TiKV. - if err := rs.executeSplitByKeys(ctx, sortedKeys); err != nil { - return errors.Trace(err) - } - - log.Info("finish spliting and scattering regions", zap.Duration("take", time.Since(startTime))) - return nil -} - -// executeSplitByKeys will split regions by **sorted** keys with following steps. -// 1. locate regions with correspond keys. -// 2. split these regions with correspond keys. -// 3. make sure new split regions are balanced. -func (rs *RegionSplitter) executeSplitByKeys( - ctx context.Context, - sortedKeys [][]byte, -) error { - startTime := time.Now() - scatterRegions, err := rs.client.SplitKeysAndScatter(ctx, sortedKeys) - if err != nil { - return errors.Trace(err) - } - if len(scatterRegions) > 0 { - log.Info("finish splitting and scattering regions. and starts to wait", zap.Int("regions", len(scatterRegions)), - zap.Duration("take", time.Since(startTime))) - rs.waitRegionsScattered(ctx, scatterRegions, split.ScatterWaitUpperInterval) - } else { - log.Info("finish splitting regions.", zap.Duration("take", time.Since(startTime))) - } - return nil -} - -// waitRegionsScattered try to wait mutilple regions scatterd in 3 minutes. -// this could timeout, but if many regions scatterd the restore could continue -// so we don't wait long time here. -func (rs *RegionSplitter) waitRegionsScattered(ctx context.Context, scatterRegions []*split.RegionInfo, timeout time.Duration) { - log.Info("start to wait for scattering regions", zap.Int("regions", len(scatterRegions))) - startTime := time.Now() - leftCnt := rs.WaitForScatterRegionsTimeout(ctx, scatterRegions, timeout) - if leftCnt == 0 { - log.Info("waiting for scattering regions done", - zap.Int("regions", len(scatterRegions)), - zap.Duration("take", time.Since(startTime))) - } else { - log.Warn("waiting for scattering regions timeout", - zap.Int("not scattered Count", leftCnt), - zap.Int("regions", len(scatterRegions)), - zap.Duration("take", time.Since(startTime))) - } -} - -func (rs *RegionSplitter) WaitForScatterRegionsTimeout(ctx context.Context, regionInfos []*split.RegionInfo, timeout time.Duration) int { - ctx2, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - leftRegions, _ := rs.client.WaitRegionsScattered(ctx2, regionInfos) - return leftRegions -} diff --git a/br/pkg/restore/internal/snap_split/split_test.go b/br/pkg/restore/internal/snap_split/split_test.go deleted file mode 100644 index 0507950d589c5..0000000000000 --- a/br/pkg/restore/internal/snap_split/split_test.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. - -package snapsplit_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/pingcap/kvproto/pkg/import_sstpb" - snapsplit "github.com/pingcap/tidb/br/pkg/restore/internal/snap_split" - "github.com/pingcap/tidb/br/pkg/restore/split" - restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" - "github.com/pingcap/tidb/br/pkg/rtree" - "github.com/pingcap/tidb/pkg/util/codec" - "github.com/stretchr/testify/require" -) - -func TestScanEmptyRegion(t *testing.T) { - mockPDCli := split.NewMockPDClientForSplit() - mockPDCli.SetRegions([][]byte{{}, {12}, {34}, {}}) - client := split.NewClient(mockPDCli, nil, nil, 100, 4) - keys := initKeys() - // make keys has only one - keys = keys[0:1] - regionSplitter := snapsplit.NewRegionSplitter(client) - - ctx := context.Background() - err := regionSplitter.ExecuteSplit(ctx, keys) - // should not return error with only one range entry - require.NoError(t, err) -} - -func TestSplitEmptyRegion(t *testing.T) { - mockPDCli := split.NewMockPDClientForSplit() - mockPDCli.SetRegions([][]byte{{}, {12}, {34}, {}}) - client := split.NewClient(mockPDCli, nil, nil, 100, 4) - regionSplitter := snapsplit.NewRegionSplitter(client) - err := regionSplitter.ExecuteSplit(context.Background(), nil) - require.NoError(t, err) -} - -// region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) -// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) -// rewrite rules: aa -> xx, cc -> bb -// expected regions after split: -// -// [, aay), [aay, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), -// [bbj, cca), [cca, xxe), [xxe, xxz), [xxz, ) -func TestSplitAndScatter(t *testing.T) { - rangeBoundaries := [][]byte{[]byte(""), []byte("aay"), []byte("bba"), []byte("bbh"), []byte("cca"), []byte("")} - encodeBytes(rangeBoundaries) - mockPDCli := split.NewMockPDClientForSplit() - mockPDCli.SetRegions(rangeBoundaries) - client := split.NewClient(mockPDCli, nil, nil, 100, 4) - regionSplitter := snapsplit.NewRegionSplitter(client) - ctx := context.Background() - - ranges := initRanges() - rules := initRewriteRules() - splitKeys := make([][]byte, 0, len(ranges)) - for _, rg := range ranges { - tmp, err := restoreutils.RewriteRange(&rg, rules) - require.NoError(t, err) - splitKeys = append(splitKeys, tmp.EndKey) - } - sort.Slice(splitKeys, func(i, j int) bool { - return bytes.Compare(splitKeys[i], splitKeys[j]) < 0 - }) - err := regionSplitter.ExecuteSplit(ctx, splitKeys) - require.NoError(t, err) - regions := mockPDCli.Regions.ScanRange(nil, nil, 100) - expected := [][]byte{[]byte(""), []byte("aay"), []byte("bba"), []byte("bbf"), []byte("bbh"), []byte("bbj"), []byte("cca"), []byte("xxe"), []byte("xxz"), []byte("")} - encodeBytes(expected) - require.Len(t, regions, len(expected)-1) - for i, region := range regions { - require.Equal(t, expected[i], region.Meta.StartKey) - require.Equal(t, expected[i+1], region.Meta.EndKey) - } -} - -func encodeBytes(keys [][]byte) { - for i := range keys { - if len(keys[i]) == 0 { - continue - } - keys[i] = codec.EncodeBytes(nil, keys[i]) - } -} - -func TestRawSplit(t *testing.T) { - // Fix issue #36490. - splitKeys := [][]byte{{}} - ctx := context.Background() - rangeBoundaries := [][]byte{[]byte(""), []byte("aay"), []byte("bba"), []byte("bbh"), []byte("cca"), []byte("")} - mockPDCli := split.NewMockPDClientForSplit() - mockPDCli.SetRegions(rangeBoundaries) - client := split.NewClient(mockPDCli, nil, nil, 100, 4, split.WithRawKV()) - - regionSplitter := snapsplit.NewRegionSplitter(client) - err := regionSplitter.ExecuteSplit(ctx, splitKeys) - require.NoError(t, err) - - regions := mockPDCli.Regions.ScanRange(nil, nil, 100) - require.Len(t, regions, len(rangeBoundaries)-1) - for i, region := range regions { - require.Equal(t, rangeBoundaries[i], region.Meta.StartKey) - require.Equal(t, rangeBoundaries[i+1], region.Meta.EndKey) - } -} - -// keys: aae, aaz, ccf, ccj -func initKeys() [][]byte { - return [][]byte{ - []byte("aae"), - []byte("aaz"), - []byte("ccf"), - []byte("ccj"), - } -} - -// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) -func initRanges() []rtree.Range { - var ranges [4]rtree.Range - ranges[0] = rtree.Range{ - StartKey: []byte("aaa"), - EndKey: []byte("aae"), - } - ranges[1] = rtree.Range{ - StartKey: []byte("aae"), - EndKey: []byte("aaz"), - } - ranges[2] = rtree.Range{ - StartKey: []byte("ccd"), - EndKey: []byte("ccf"), - } - ranges[3] = rtree.Range{ - StartKey: []byte("ccf"), - EndKey: []byte("ccj"), - } - return ranges[:] -} - -func initRewriteRules() *restoreutils.RewriteRules { - var rules [2]*import_sstpb.RewriteRule - rules[0] = &import_sstpb.RewriteRule{ - OldKeyPrefix: []byte("aa"), - NewKeyPrefix: []byte("xx"), - } - rules[1] = &import_sstpb.RewriteRule{ - OldKeyPrefix: []byte("cc"), - NewKeyPrefix: []byte("bb"), - } - return &restoreutils.RewriteRules{ - Data: rules[:], - } -} diff --git a/br/pkg/restore/log_client/BUILD.bazel b/br/pkg/restore/log_client/BUILD.bazel index 5975b0726aa1d..d55c8066514f3 100644 --- a/br/pkg/restore/log_client/BUILD.bazel +++ b/br/pkg/restore/log_client/BUILD.bazel @@ -24,7 +24,6 @@ go_library( "//br/pkg/restore", "//br/pkg/restore/ingestrec", "//br/pkg/restore/internal/import_client", - "//br/pkg/restore/internal/log_split", "//br/pkg/restore/internal/rawkv", "//br/pkg/restore/split", "//br/pkg/restore/tiflashrec", diff --git a/br/pkg/restore/log_client/client.go b/br/pkg/restore/log_client/client.go index d208b58bb15d2..4160aa86a6048 100644 --- a/br/pkg/restore/log_client/client.go +++ b/br/pkg/restore/log_client/client.go @@ -46,7 +46,6 @@ import ( "github.com/pingcap/tidb/br/pkg/restore" "github.com/pingcap/tidb/br/pkg/restore/ingestrec" importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" - logsplit "github.com/pingcap/tidb/br/pkg/restore/internal/log_split" "github.com/pingcap/tidb/br/pkg/restore/internal/rawkv" "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/restore/tiflashrec" @@ -1719,7 +1718,7 @@ func (rc *LogClient) FailpointDoChecksumForLogRestore( type LogFilesIterWithSplitHelper struct { iter LogIter - helper *logsplit.LogSplitHelper + helper *split.LogSplitHelper buffer []*LogDataFileInfo next int } @@ -1729,7 +1728,7 @@ const SplitFilesBufferSize = 4096 func NewLogFilesIterWithSplitHelper(iter LogIter, rules map[int64]*restoreutils.RewriteRules, client split.SplitClient, splitSize uint64, splitKeys int64) LogIter { return &LogFilesIterWithSplitHelper{ iter: iter, - helper: logsplit.NewLogSplitHelper(rules, client, splitSize, splitKeys), + helper: split.NewLogSplitHelper(rules, client, splitSize, splitKeys), buffer: nil, next: 0, } diff --git a/br/pkg/restore/log_client/client_test.go b/br/pkg/restore/log_client/client_test.go index 0b1baf7af5675..6b16ec34c28ba 100644 --- a/br/pkg/restore/log_client/client_test.go +++ b/br/pkg/restore/log_client/client_test.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/br/pkg/gluetidb" "github.com/pingcap/tidb/br/pkg/mock" logclient "github.com/pingcap/tidb/br/pkg/restore/log_client" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/stream" "github.com/pingcap/tidb/br/pkg/utils/iter" @@ -90,7 +91,7 @@ func TestDeleteRangeQueryExec(t *testing.T) { m := mc g := gluetidb.New() client := logclient.NewRestoreClient( - utiltest.NewFakePDClient(nil, false, nil), nil, nil, keepalive.ClientParameters{}) + split.NewFakePDClient(nil, false, nil), nil, nil, keepalive.ClientParameters{}) err := client.Init(g, m.Storage) require.NoError(t, err) @@ -109,7 +110,7 @@ func TestDeleteRangeQuery(t *testing.T) { g := gluetidb.New() client := logclient.NewRestoreClient( - utiltest.NewFakePDClient(nil, false, nil), nil, nil, keepalive.ClientParameters{}) + split.NewFakePDClient(nil, false, nil), nil, nil, keepalive.ClientParameters{}) err := client.Init(g, m.Storage) require.NoError(t, err) @@ -1338,7 +1339,7 @@ func TestLogFilesIterWithSplitHelper(t *testing.T) { } mockIter := &mockLogIter{} ctx := context.Background() - logIter := logclient.NewLogFilesIterWithSplitHelper(mockIter, rewriteRulesMap, utiltest.NewFakeSplitClient(), 144*1024*1024, 1440000) + logIter := logclient.NewLogFilesIterWithSplitHelper(mockIter, rewriteRulesMap, split.NewFakeSplitClient(), 144*1024*1024, 1440000) next := 0 for r := logIter.TryNext(ctx); !r.Finished; r = logIter.TryNext(ctx) { require.NoError(t, r.Err) diff --git a/br/pkg/restore/log_client/import_retry_test.go b/br/pkg/restore/log_client/import_retry_test.go index 5c47f1f3acb27..bcde03c69c1ed 100644 --- a/br/pkg/restore/log_client/import_retry_test.go +++ b/br/pkg/restore/log_client/import_retry_test.go @@ -22,7 +22,6 @@ import ( logclient "github.com/pingcap/tidb/br/pkg/restore/log_client" "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/br/pkg/utiltest" "github.com/pingcap/tidb/pkg/store/pdtypes" "github.com/pingcap/tidb/pkg/util/codec" "github.com/stretchr/testify/require" @@ -92,12 +91,12 @@ func (c *TestClient) GetAllRegions() map[uint64]*split.RegionInfo { return c.regions } -func (c *TestClient) GetPDClient() *utiltest.FakePDClient { +func (c *TestClient) GetPDClient() *split.FakePDClient { stores := make([]*metapb.Store, 0, len(c.stores)) for _, store := range c.stores { stores = append(stores, store) } - return utiltest.NewFakePDClient(stores, false, nil) + return split.NewFakePDClient(stores, false, nil) } func (c *TestClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { diff --git a/br/pkg/restore/misc_test.go b/br/pkg/restore/misc_test.go index b461b3e395ebd..37fe2c4544859 100644 --- a/br/pkg/restore/misc_test.go +++ b/br/pkg/restore/misc_test.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/br/pkg/mock" "github.com/pingcap/tidb/br/pkg/restore" - "github.com/pingcap/tidb/br/pkg/utiltest" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/session" @@ -107,7 +107,7 @@ func TestAssertUserDBsEmpty(t *testing.T) { func TestGetTSWithRetry(t *testing.T) { t.Run("PD leader is healthy:", func(t *testing.T) { retryTimes := -1000 - pDClient := utiltest.NewFakePDClient(nil, false, &retryTimes) + pDClient := split.NewFakePDClient(nil, false, &retryTimes) _, err := restore.GetTSWithRetry(context.Background(), pDClient) require.NoError(t, err) }) @@ -118,14 +118,14 @@ func TestGetTSWithRetry(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/br/pkg/utils/set-attempt-to-one")) }() retryTimes := -1000 - pDClient := utiltest.NewFakePDClient(nil, true, &retryTimes) + pDClient := split.NewFakePDClient(nil, true, &retryTimes) _, err := restore.GetTSWithRetry(context.Background(), pDClient) require.Error(t, err) }) t.Run("PD leader switch successfully", func(t *testing.T) { retryTimes := 0 - pDClient := utiltest.NewFakePDClient(nil, true, &retryTimes) + pDClient := split.NewFakePDClient(nil, true, &retryTimes) _, err := restore.GetTSWithRetry(context.Background(), pDClient) require.NoError(t, err) }) diff --git a/br/pkg/restore/snap_client/BUILD.bazel b/br/pkg/restore/snap_client/BUILD.bazel index e10541b3b4d00..d77985e92b808 100644 --- a/br/pkg/restore/snap_client/BUILD.bazel +++ b/br/pkg/restore/snap_client/BUILD.bazel @@ -26,7 +26,6 @@ go_library( "//br/pkg/restore/internal/import_client", "//br/pkg/restore/internal/prealloc_db", "//br/pkg/restore/internal/prealloc_table_id", - "//br/pkg/restore/internal/snap_split", "//br/pkg/restore/split", "//br/pkg/restore/utils", "//br/pkg/storage", @@ -93,9 +92,9 @@ go_test( "//br/pkg/mock", "//br/pkg/restore", "//br/pkg/restore/internal/import_client", + "//br/pkg/restore/split", "//br/pkg/restore/utils", "//br/pkg/utils", - "//br/pkg/utiltest", "//pkg/domain", "//pkg/kv", "//pkg/meta/model", diff --git a/br/pkg/restore/snap_client/client.go b/br/pkg/restore/snap_client/client.go index 8747278baaf99..957ec300cff94 100644 --- a/br/pkg/restore/snap_client/client.go +++ b/br/pkg/restore/snap_client/client.go @@ -183,7 +183,7 @@ func (rc *SnapClient) Close() { rc.closeConn() if err := rc.fileImporter.Close(); err != nil { - log.Warn("failed to close file improter") + log.Warn("failed to close file importer") } log.Info("Restore client closed") @@ -457,6 +457,7 @@ func (rc *SnapClient) initClients(ctx context.Context, backend *backuppb.Storage if isRawKvMode { splitClientOpts = append(splitClientOpts, split.WithRawKV()) } + metaClient := split.NewClient(rc.pdClient, rc.pdHTTPClient, rc.tlsConf, maxSplitKeysOnce, rc.storeCount+1, splitClientOpts...) importCli := importclient.NewImportClient(metaClient, rc.tlsConf, rc.keepaliveConf) rc.fileImporter, err = NewSnapFileImporter(ctx, metaClient, importCli, backend, isRawKvMode, isTxnKvMode, stores, rc.rewriteMode, rc.concurrencyPerStore) diff --git a/br/pkg/restore/snap_client/client_test.go b/br/pkg/restore/snap_client/client_test.go index 380e4421b68fd..dd919646ddf61 100644 --- a/br/pkg/restore/snap_client/client_test.go +++ b/br/pkg/restore/snap_client/client_test.go @@ -34,7 +34,7 @@ import ( "github.com/pingcap/tidb/br/pkg/mock" importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" - "github.com/pingcap/tidb/br/pkg/utiltest" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/pkg/meta/model" pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" @@ -48,7 +48,7 @@ var mc *mock.Cluster func TestCreateTables(t *testing.T) { m := mc g := gluetidb.New() - client := snapclient.NewRestoreClient(m.PDClient, m.PDHTTPCli, nil, utiltest.DefaultTestKeepaliveCfg) + client := snapclient.NewRestoreClient(m.PDClient, m.PDHTTPCli, nil, split.DefaultTestKeepaliveCfg) err := client.Init(g, m.Storage) require.NoError(t, err) @@ -119,7 +119,7 @@ func TestNeedCheckTargetClusterFresh(t *testing.T) { defer cluster.Stop() g := gluetidb.New() - client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, utiltest.DefaultTestKeepaliveCfg) + client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, split.DefaultTestKeepaliveCfg) err := client.Init(g, cluster.Storage) require.NoError(t, err) @@ -149,7 +149,7 @@ func TestCheckTargetClusterFresh(t *testing.T) { defer cluster.Stop() g := gluetidb.New() - client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, utiltest.DefaultTestKeepaliveCfg) + client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, split.DefaultTestKeepaliveCfg) err := client.Init(g, cluster.Storage) require.NoError(t, err) @@ -166,7 +166,7 @@ func TestCheckTargetClusterFreshWithTable(t *testing.T) { defer cluster.Stop() g := gluetidb.New() - client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, utiltest.DefaultTestKeepaliveCfg) + client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, split.DefaultTestKeepaliveCfg) err := client.Init(g, cluster.Storage) require.NoError(t, err) @@ -201,7 +201,7 @@ func TestCheckTargetClusterFreshWithTable(t *testing.T) { func TestInitFullClusterRestore(t *testing.T) { cluster := mc g := gluetidb.New() - client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, utiltest.DefaultTestKeepaliveCfg) + client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, split.DefaultTestKeepaliveCfg) err := client.Init(g, cluster.Storage) require.NoError(t, err) @@ -309,7 +309,7 @@ func TestSetSpeedLimit(t *testing.T) { // 1. The cost of concurrent communication is expected to be less than the cost of serial communication. client := snapclient.NewRestoreClient( - utiltest.NewFakePDClient(mockStores, false, nil), nil, nil, utiltest.DefaultTestKeepaliveCfg) + split.NewFakePDClient(mockStores, false, nil), nil, nil, split.DefaultTestKeepaliveCfg) ctx := context.Background() recordStores = NewRecordStores() @@ -334,7 +334,7 @@ func TestSetSpeedLimit(t *testing.T) { recordStores = NewRecordStores() mockStores[5].Id = SET_SPEED_LIMIT_ERROR // setting a fault store client = snapclient.NewRestoreClient( - utiltest.NewFakePDClient(mockStores, false, nil), nil, nil, utiltest.DefaultTestKeepaliveCfg) + split.NewFakePDClient(mockStores, false, nil), nil, nil, split.DefaultTestKeepaliveCfg) // Concurrency needs to be less than the number of stores err = snapclient.MockCallSetSpeedLimit(ctx, FakeImporterClient{}, client, 2) diff --git a/br/pkg/restore/snap_client/import_test.go b/br/pkg/restore/snap_client/import_test.go index 762beb3784d22..324b2ec9a007a 100644 --- a/br/pkg/restore/snap_client/import_test.go +++ b/br/pkg/restore/snap_client/import_test.go @@ -24,8 +24,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" + "github.com/pingcap/tidb/br/pkg/restore/split" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" - "github.com/pingcap/tidb/br/pkg/utiltest" "github.com/pingcap/tidb/pkg/util/codec" "github.com/stretchr/testify/require" ) @@ -156,7 +156,7 @@ func (client *fakeImporterClient) MultiIngest( func TestSnapImporter(t *testing.T) { ctx := context.Background() - splitClient := utiltest.NewFakeSplitClient() + splitClient := split.NewFakeSplitClient() for _, region := range generateRegions() { splitClient.AppendPdRegion(region) } @@ -180,7 +180,7 @@ func TestSnapImporter(t *testing.T) { func TestSnapImporterRaw(t *testing.T) { ctx := context.Background() - splitClient := utiltest.NewFakeSplitClient() + splitClient := split.NewFakeSplitClient() for _, region := range generateRegions() { splitClient.AppendPdRegion(region) } diff --git a/br/pkg/restore/snap_client/placement_rule_manager_test.go b/br/pkg/restore/snap_client/placement_rule_manager_test.go index c078ebd6e48c4..1e60aaa5b93d1 100644 --- a/br/pkg/restore/snap_client/placement_rule_manager_test.go +++ b/br/pkg/restore/snap_client/placement_rule_manager_test.go @@ -24,8 +24,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/metautil" snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" + "github.com/pingcap/tidb/br/pkg/restore/split" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" - "github.com/pingcap/tidb/br/pkg/utiltest" "github.com/pingcap/tidb/pkg/meta/model" pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/tablecodec" @@ -93,8 +93,8 @@ func TestContextManagerOnlineNoStores(t *testing.T) { }, } - pdClient := utiltest.NewFakePDClient(stores, false, nil) - pdHTTPCli := utiltest.NewFakePDHTTPClient() + pdClient := split.NewFakePDClient(stores, false, nil) + pdHTTPCli := split.NewFakePDHTTPClient() placementRuleManager, err := snapclient.NewPlacementRuleManager(ctx, pdClient, pdHTTPCli, nil, true) require.NoError(t, err) tables := generateTables() @@ -234,9 +234,9 @@ func TestContextManagerOnlineLeave(t *testing.T) { stores := generateStores() regions := generateRegions() - pdClient := utiltest.NewFakePDClient(stores, false, nil) + pdClient := split.NewFakePDClient(stores, false, nil) pdClient.SetRegions(regions) - pdHTTPCli := utiltest.NewFakePDHTTPClient() + pdHTTPCli := split.NewFakePDHTTPClient() placementRuleManager, err := snapclient.NewPlacementRuleManager(ctx, pdClient, pdHTTPCli, nil, true) require.NoError(t, err) tables := generateTables() diff --git a/br/pkg/restore/snap_client/systable_restore_test.go b/br/pkg/restore/snap_client/systable_restore_test.go index 3759d3d9c8a6a..2f37ffacee830 100644 --- a/br/pkg/restore/snap_client/systable_restore_test.go +++ b/br/pkg/restore/snap_client/systable_restore_test.go @@ -23,8 +23,8 @@ import ( "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/restore" snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/utils" - "github.com/pingcap/tidb/br/pkg/utiltest" "github.com/pingcap/tidb/pkg/meta/model" pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" @@ -35,7 +35,7 @@ import ( func TestCheckSysTableCompatibility(t *testing.T) { cluster := mc g := gluetidb.New() - client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, utiltest.DefaultTestKeepaliveCfg) + client := snapclient.NewRestoreClient(cluster.PDClient, cluster.PDHTTPCli, nil, split.DefaultTestKeepaliveCfg) err := client.Init(g, cluster.Storage) require.NoError(t, err) diff --git a/br/pkg/restore/snap_client/tikv_sender.go b/br/pkg/restore/snap_client/tikv_sender.go index a5f27a87c4050..85aeac0d76f24 100644 --- a/br/pkg/restore/snap_client/tikv_sender.go +++ b/br/pkg/restore/snap_client/tikv_sender.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/tidb/br/pkg/checkpoint" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/logutil" - snapsplit "github.com/pingcap/tidb/br/pkg/restore/internal/snap_split" "github.com/pingcap/tidb/br/pkg/restore/split" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/summary" @@ -341,7 +340,7 @@ func (rc *SnapClient) SplitPoints( splitClientOpts = append(splitClientOpts, split.WithRawKV()) } - splitter := snapsplit.NewRegionSplitter(split.NewClient( + splitter := split.NewRegionSplitter(split.NewClient( rc.pdClient, rc.pdHTTPClient, rc.tlsConf, @@ -350,7 +349,7 @@ func (rc *SnapClient) SplitPoints( splitClientOpts..., )) - return splitter.ExecuteSplit(ctx, sortedSplitKeys) + return splitter.ExecuteSortedKeys(ctx, sortedSplitKeys) } func getFileRangeKey(f string) string { diff --git a/br/pkg/restore/split/BUILD.bazel b/br/pkg/restore/split/BUILD.bazel index 2d7002b493ad2..e27bb1834d7ac 100644 --- a/br/pkg/restore/split/BUILD.bazel +++ b/br/pkg/restore/split/BUILD.bazel @@ -7,6 +7,7 @@ go_library( "mock_pd_client.go", "region.go", "split.go", + "sum_sorted.go", ], importpath = "github.com/pingcap/tidb/br/pkg/restore/split", visibility = ["//visibility:public"], @@ -14,19 +15,24 @@ go_library( "//br/pkg/conn/util", "//br/pkg/errors", "//br/pkg/logutil", + "//br/pkg/pdutil", + "//br/pkg/restore/utils", "//br/pkg/utils", "//pkg/kv", "//pkg/lightning/common", "//pkg/lightning/config", "//pkg/lightning/log", "//pkg/store/pdtypes", + "//pkg/tablecodec", "//pkg/util", "//pkg/util/codec", "//pkg/util/intest", "//pkg/util/redact", "@com_github_docker_go_units//:go-units", + "@com_github_google_btree//:btree", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", + "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/errorpb", "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_pingcap_kvproto//pkg/metapb", @@ -39,6 +45,7 @@ go_library( "@org_golang_google_grpc//codes", "@org_golang_google_grpc//credentials", "@org_golang_google_grpc//credentials/insecure", + "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", "@org_golang_x_sync//errgroup", "@org_uber_go_multierr//:multierr", @@ -52,12 +59,15 @@ go_test( srcs = [ "client_test.go", "split_test.go", + "sum_sorted_test.go", ], embed = [":split"], flaky = True, - shard_count = 19, + shard_count = 27, deps = [ "//br/pkg/errors", + "//br/pkg/restore/utils", + "//br/pkg/rtree", "//br/pkg/utils", "//pkg/kv", "//pkg/sessionctx/stmtctx", @@ -65,8 +75,11 @@ go_test( "//pkg/tablecodec", "//pkg/types", "//pkg/util/codec", + "@com_github_docker_go_units//:go-units", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", + "@com_github_pingcap_kvproto//pkg/brpb", + "@com_github_pingcap_kvproto//pkg/import_sstpb", "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_pingcap_kvproto//pkg/pdpb", diff --git a/br/pkg/restore/split/mock_pd_client.go b/br/pkg/restore/split/mock_pd_client.go index 92cd055939926..6df6cd56f94b0 100644 --- a/br/pkg/restore/split/mock_pd_client.go +++ b/br/pkg/restore/split/mock_pd_client.go @@ -5,25 +5,172 @@ package split import ( "bytes" "context" + "math" "sync" + "time" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/pkg/store/pdtypes" "github.com/pingcap/tidb/pkg/util/codec" pd "github.com/tikv/pd/client" + pdhttp "github.com/tikv/pd/client/http" "google.golang.org/grpc/codes" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" ) +type TestClient struct { + SplitClient + pd.Client + + mu sync.RWMutex + stores map[uint64]*metapb.Store + Regions map[uint64]*RegionInfo + RegionsInfo *pdtypes.RegionTree // For now it's only used in ScanRegions + nextRegionID uint64 + + scattered map[uint64]bool + InjectErr bool + InjectTimes int32 +} + +func NewTestClient( + stores map[uint64]*metapb.Store, + regions map[uint64]*RegionInfo, + nextRegionID uint64, +) *TestClient { + regionsInfo := &pdtypes.RegionTree{} + for _, regionInfo := range regions { + regionsInfo.SetRegion(pdtypes.NewRegionInfo(regionInfo.Region, regionInfo.Leader)) + } + return &TestClient{ + stores: stores, + Regions: regions, + RegionsInfo: regionsInfo, + nextRegionID: nextRegionID, + scattered: map[uint64]bool{}, + } +} + +func (c *TestClient) GetAllRegions() map[uint64]*RegionInfo { + c.mu.RLock() + defer c.mu.RUnlock() + return c.Regions +} + +func (c *TestClient) GetPDClient() *FakePDClient { + stores := make([]*metapb.Store, 0, len(c.stores)) + for _, store := range c.stores { + stores = append(stores, store) + } + return NewFakePDClient(stores, false, nil) +} + +func (c *TestClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + c.mu.RLock() + defer c.mu.RUnlock() + store, ok := c.stores[storeID] + if !ok { + return nil, errors.Errorf("store not found") + } + return store, nil +} + +func (c *TestClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { + c.mu.RLock() + defer c.mu.RUnlock() + for _, region := range c.Regions { + if bytes.Compare(key, region.Region.StartKey) >= 0 && + (len(region.Region.EndKey) == 0 || bytes.Compare(key, region.Region.EndKey) < 0) { + return region, nil + } + } + return nil, errors.Errorf("region not found: key=%s", string(key)) +} + +func (c *TestClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { + c.mu.RLock() + defer c.mu.RUnlock() + region, ok := c.Regions[regionID] + if !ok { + return nil, errors.Errorf("region not found: id=%d", regionID) + } + return region, nil +} + +func (c *TestClient) SplitWaitAndScatter(_ context.Context, _ *RegionInfo, keys [][]byte) ([]*RegionInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + newRegions := make([]*RegionInfo, 0) + for _, key := range keys { + var target *RegionInfo + splitKey := codec.EncodeBytes([]byte{}, key) + for _, region := range c.Regions { + if region.ContainsInterior(splitKey) { + target = region + } + } + if target == nil { + continue + } + newRegion := &RegionInfo{ + Region: &metapb.Region{ + Peers: target.Region.Peers, + Id: c.nextRegionID, + StartKey: target.Region.StartKey, + EndKey: splitKey, + }, + } + c.Regions[c.nextRegionID] = newRegion + c.nextRegionID++ + target.Region.StartKey = splitKey + c.Regions[target.Region.Id] = target + newRegions = append(newRegions, newRegion) + } + return newRegions, nil +} + +func (c *TestClient) GetOperator(context.Context, uint64) (*pdpb.GetOperatorResponse, error) { + return &pdpb.GetOperatorResponse{ + Header: new(pdpb.ResponseHeader), + }, nil +} + +func (c *TestClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { + if c.InjectErr && c.InjectTimes > 0 { + c.InjectTimes -= 1 + return nil, status.Error(codes.Unavailable, "not leader") + } + if len(key) != 0 && bytes.Equal(key, endKey) { + return nil, status.Error(codes.Internal, "key and endKey are the same") + } + + infos := c.RegionsInfo.ScanRange(key, endKey, limit) + regions := make([]*RegionInfo, 0, len(infos)) + for _, info := range infos { + regions = append(regions, &RegionInfo{ + Region: info.Meta, + Leader: info.Leader, + }) + } + return regions, nil +} + +func (c *TestClient) WaitRegionsScattered(context.Context, []*RegionInfo) (int, error) { + return 0, nil +} + // MockPDClientForSplit is a mock PD client for testing split and scatter. type MockPDClientForSplit struct { pd.Client mu sync.Mutex + stores map[uint64]*metapb.Store Regions *pdtypes.RegionTree lastRegionID uint64 scanRegions struct { @@ -66,6 +213,13 @@ func (c *MockPDClientForSplit) SetRegions(boundaries [][]byte) []*metapb.Region return c.setRegions(boundaries) } +func (c *MockPDClientForSplit) SetStores(stores map[uint64]*metapb.Store) { + c.mu.Lock() + defer c.mu.Unlock() + + c.stores = stores +} + func (c *MockPDClientForSplit) setRegions(boundaries [][]byte) []*metapb.Region { ret := make([]*metapb.Region, 0, len(boundaries)-1) for i := 1; i < len(boundaries); i++ { @@ -236,3 +390,273 @@ func (c *MockPDClientForSplit) GetOperator(_ context.Context, regionID uint64) ( c.getOperator.responses[regionID] = c.getOperator.responses[regionID][1:] return ret, nil } + +func (c *MockPDClientForSplit) GetStore(_ context.Context, storeID uint64) (*metapb.Store, error) { + return c.stores[storeID], nil +} + +var DefaultTestKeepaliveCfg = keepalive.ClientParameters{ + Time: 3 * time.Second, + Timeout: 10 * time.Second, +} + +var ( + ExpectPDCfgGeneratorsResult = map[string]any{ + "merge-schedule-limit": 0, + "leader-schedule-limit": float64(40), + "region-schedule-limit": float64(40), + "max-snapshot-count": float64(40), + "enable-location-replacement": "false", + "max-pending-peer-count": uint64(math.MaxInt32), + } + + ExistPDCfgGeneratorBefore = map[string]any{ + "merge-schedule-limit": 100, + "leader-schedule-limit": float64(100), + "region-schedule-limit": float64(100), + "max-snapshot-count": float64(100), + "enable-location-replacement": "true", + "max-pending-peer-count": 100, + } +) + +type FakePDHTTPClient struct { + pdhttp.Client + + expireSchedulers map[string]time.Time + cfgs map[string]any + + rules map[string]*pdhttp.Rule +} + +func NewFakePDHTTPClient() *FakePDHTTPClient { + return &FakePDHTTPClient{ + expireSchedulers: make(map[string]time.Time), + cfgs: make(map[string]any), + + rules: make(map[string]*pdhttp.Rule), + } +} + +func (fpdh *FakePDHTTPClient) GetScheduleConfig(_ context.Context) (map[string]any, error) { + return ExistPDCfgGeneratorBefore, nil +} + +func (fpdh *FakePDHTTPClient) GetSchedulers(_ context.Context) ([]string, error) { + schedulers := make([]string, 0, len(pdutil.Schedulers)) + for scheduler := range pdutil.Schedulers { + schedulers = append(schedulers, scheduler) + } + return schedulers, nil +} + +func (fpdh *FakePDHTTPClient) SetSchedulerDelay(_ context.Context, key string, delay int64) error { + expireTime, ok := fpdh.expireSchedulers[key] + if ok { + if time.Now().Compare(expireTime) > 0 { + return errors.Errorf("the scheduler config set is expired") + } + if delay == 0 { + delete(fpdh.expireSchedulers, key) + } + } + if !ok && delay == 0 { + return errors.Errorf("set the nonexistent scheduler") + } + expireTime = time.Now().Add(time.Second * time.Duration(delay)) + fpdh.expireSchedulers[key] = expireTime + return nil +} + +func (fpdh *FakePDHTTPClient) SetConfig(_ context.Context, config map[string]any, ttl ...float64) error { + for key, value := range config { + fpdh.cfgs[key] = value + } + return nil +} + +func (fpdh *FakePDHTTPClient) GetConfig(_ context.Context) (map[string]any, error) { + return fpdh.cfgs, nil +} + +func (fpdh *FakePDHTTPClient) GetDelaySchedulers() map[string]struct{} { + delaySchedulers := make(map[string]struct{}) + for key, t := range fpdh.expireSchedulers { + now := time.Now() + if now.Compare(t) < 0 { + delaySchedulers[key] = struct{}{} + } + } + return delaySchedulers +} + +func (fpdh *FakePDHTTPClient) GetPlacementRule(_ context.Context, groupID string, ruleID string) (*pdhttp.Rule, error) { + rule, ok := fpdh.rules[ruleID] + if !ok { + rule = &pdhttp.Rule{ + GroupID: groupID, + ID: ruleID, + } + fpdh.rules[ruleID] = rule + } + return rule, nil +} + +func (fpdh *FakePDHTTPClient) SetPlacementRule(_ context.Context, rule *pdhttp.Rule) error { + fpdh.rules[rule.ID] = rule + return nil +} + +func (fpdh *FakePDHTTPClient) DeletePlacementRule(_ context.Context, groupID string, ruleID string) error { + delete(fpdh.rules, ruleID) + return nil +} + +type FakePDClient struct { + pd.Client + stores []*metapb.Store + regions []*pd.Region + + notLeader bool + retryTimes *int + + peerStoreId uint64 +} + +func NewFakePDClient(stores []*metapb.Store, notLeader bool, retryTime *int) *FakePDClient { + var retryTimeInternal int + if retryTime == nil { + retryTime = &retryTimeInternal + } + return &FakePDClient{ + stores: stores, + + notLeader: notLeader, + retryTimes: retryTime, + + peerStoreId: 0, + } +} + +func (fpdc *FakePDClient) SetRegions(regions []*pd.Region) { + fpdc.regions = regions +} + +func (fpdc *FakePDClient) GetAllStores(context.Context, ...pd.GetStoreOption) ([]*metapb.Store, error) { + return append([]*metapb.Store{}, fpdc.stores...), nil +} + +func (fpdc *FakePDClient) ScanRegions( + ctx context.Context, + key, endKey []byte, + limit int, + opts ...pd.GetRegionOption, +) ([]*pd.Region, error) { + regions := make([]*pd.Region, 0, len(fpdc.regions)) + fpdc.peerStoreId = fpdc.peerStoreId + 1 + peerStoreId := (fpdc.peerStoreId + 1) / 2 + for _, region := range fpdc.regions { + if len(endKey) != 0 && bytes.Compare(region.Meta.StartKey, endKey) >= 0 { + continue + } + if len(region.Meta.EndKey) != 0 && bytes.Compare(region.Meta.EndKey, key) <= 0 { + continue + } + region.Meta.Peers = []*metapb.Peer{{StoreId: peerStoreId}} + regions = append(regions, region) + } + return regions, nil +} + +func (fpdc *FakePDClient) BatchScanRegions( + ctx context.Context, + ranges []pd.KeyRange, + limit int, + opts ...pd.GetRegionOption, +) ([]*pd.Region, error) { + regions := make([]*pd.Region, 0, len(fpdc.regions)) + fpdc.peerStoreId = fpdc.peerStoreId + 1 + peerStoreId := (fpdc.peerStoreId + 1) / 2 + for _, region := range fpdc.regions { + inRange := false + for _, keyRange := range ranges { + if len(keyRange.EndKey) != 0 && bytes.Compare(region.Meta.StartKey, keyRange.EndKey) >= 0 { + continue + } + if len(region.Meta.EndKey) != 0 && bytes.Compare(region.Meta.EndKey, keyRange.StartKey) <= 0 { + continue + } + inRange = true + } + if inRange { + region.Meta.Peers = []*metapb.Peer{{StoreId: peerStoreId}} + regions = append(regions, region) + } + } + return nil, nil +} + +func (fpdc *FakePDClient) GetTS(ctx context.Context) (int64, int64, error) { + (*fpdc.retryTimes)++ + if *fpdc.retryTimes >= 3 { // the mock PD leader switched successfully + fpdc.notLeader = false + } + + if fpdc.notLeader { + return 0, 0, errors.Errorf( + "rpc error: code = Unknown desc = [PD:tso:ErrGenerateTimestamp]generate timestamp failed, " + + "requested pd is not leader of cluster", + ) + } + return 1, 1, nil +} + +type FakeSplitClient struct { + SplitClient + regions []*RegionInfo +} + +func NewFakeSplitClient() *FakeSplitClient { + return &FakeSplitClient{ + regions: make([]*RegionInfo, 0), + } +} + +func (f *FakeSplitClient) AppendRegion(startKey, endKey []byte) { + f.regions = append(f.regions, &RegionInfo{ + Region: &metapb.Region{ + StartKey: startKey, + EndKey: endKey, + }, + }) +} + +func (f *FakeSplitClient) AppendPdRegion(region *pd.Region) { + f.regions = append(f.regions, &RegionInfo{ + Region: region.Meta, + Leader: region.Leader, + }) +} + +func (f *FakeSplitClient) ScanRegions( + ctx context.Context, + startKey, endKey []byte, + limit int, +) ([]*RegionInfo, error) { + result := make([]*RegionInfo, 0) + count := 0 + for _, rng := range f.regions { + if bytes.Compare(rng.Region.StartKey, endKey) <= 0 && bytes.Compare(rng.Region.EndKey, startKey) > 0 { + result = append(result, rng) + count++ + } + if count >= limit { + break + } + } + return result, nil +} + +func (f *FakeSplitClient) WaitRegionsScattered(context.Context, []*RegionInfo) (int, error) { + return 0, nil +} diff --git a/br/pkg/restore/split/split.go b/br/pkg/restore/split/split.go index ce6faa90b209c..f7df83cd2e3e9 100644 --- a/br/pkg/restore/split/split.go +++ b/br/pkg/restore/split/split.go @@ -7,18 +7,25 @@ import ( "context" "encoding/hex" goerrors "errors" + "sort" + "sync" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" + restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/lightning/config" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/redact" "go.uber.org/zap" + "golang.org/x/sync/errgroup" ) var ( @@ -37,6 +44,461 @@ const ( ScanRegionPaginationLimit = 128 ) +type rewriteSplitter struct { + rewriteKey []byte + tableID int64 + rule *restoreutils.RewriteRules + splitter *SplitHelper +} + +type splitHelperIterator struct { + tableSplitters []*rewriteSplitter +} + +func (iter *splitHelperIterator) Traverse(fn func(v Valued, endKey []byte, rule *restoreutils.RewriteRules) bool) { + for _, entry := range iter.tableSplitters { + endKey := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(entry.tableID+1)) + rule := entry.rule + entry.splitter.Traverse(func(v Valued) bool { + return fn(v, endKey, rule) + }) + } +} + +type LogSplitHelper struct { + tableSplitter map[int64]*SplitHelper + rules map[int64]*restoreutils.RewriteRules + client SplitClient + pool *util.WorkerPool + eg *errgroup.Group + regionsCh chan []*RegionInfo + + splitThresholdSize uint64 + splitThresholdKeys int64 +} + +func NewLogSplitHelper(rules map[int64]*restoreutils.RewriteRules, client SplitClient, splitSize uint64, splitKeys int64) *LogSplitHelper { + return &LogSplitHelper{ + tableSplitter: make(map[int64]*SplitHelper), + rules: rules, + client: client, + pool: util.NewWorkerPool(128, "split region"), + eg: nil, + + splitThresholdSize: splitSize, + splitThresholdKeys: splitKeys, + } +} + +func (helper *LogSplitHelper) iterator() *splitHelperIterator { + tableSplitters := make([]*rewriteSplitter, 0, len(helper.tableSplitter)) + for tableID, splitter := range helper.tableSplitter { + delete(helper.tableSplitter, tableID) + rewriteRule, exists := helper.rules[tableID] + if !exists { + log.Info("skip splitting due to no table id matched", zap.Int64("tableID", tableID)) + continue + } + newTableID := restoreutils.GetRewriteTableID(tableID, rewriteRule) + if newTableID == 0 { + log.Warn("failed to get the rewrite table id", zap.Int64("tableID", tableID)) + continue + } + tableSplitters = append(tableSplitters, &rewriteSplitter{ + rewriteKey: codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(newTableID)), + tableID: newTableID, + rule: rewriteRule, + splitter: splitter, + }) + } + sort.Slice(tableSplitters, func(i, j int) bool { + return bytes.Compare(tableSplitters[i].rewriteKey, tableSplitters[j].rewriteKey) < 0 + }) + return &splitHelperIterator{ + tableSplitters: tableSplitters, + } +} + +const splitFileThreshold = 1024 * 1024 // 1 MB + +func (helper *LogSplitHelper) skipFile(file *backuppb.DataFileInfo) bool { + _, exist := helper.rules[file.TableId] + return file.Length < splitFileThreshold || file.IsMeta || !exist +} + +func (helper *LogSplitHelper) Merge(file *backuppb.DataFileInfo) { + if helper.skipFile(file) { + return + } + splitHelper, exist := helper.tableSplitter[file.TableId] + if !exist { + splitHelper = NewSplitHelper() + helper.tableSplitter[file.TableId] = splitHelper + } + + splitHelper.Merge(Valued{ + Key: Span{ + StartKey: file.StartKey, + EndKey: file.EndKey, + }, + Value: Value{ + Size: file.Length, + Number: file.NumberOfEntries, + }, + }) +} + +type splitFunc = func(context.Context, *RegionSplitter, uint64, int64, *RegionInfo, []Valued) error + +func (helper *LogSplitHelper) splitRegionByPoints( + ctx context.Context, + regionSplitter *RegionSplitter, + initialLength uint64, + initialNumber int64, + region *RegionInfo, + valueds []Valued, +) error { + var ( + splitPoints [][]byte = make([][]byte, 0) + lastKey []byte = region.Region.StartKey + length uint64 = initialLength + number int64 = initialNumber + ) + for _, v := range valueds { + // decode will discard ts behind the key, which results in the same key for consecutive ranges + if !bytes.Equal(lastKey, v.GetStartKey()) && (v.Value.Size+length > helper.splitThresholdSize || v.Value.Number+number > helper.splitThresholdKeys) { + _, rawKey, _ := codec.DecodeBytes(v.GetStartKey(), nil) + splitPoints = append(splitPoints, rawKey) + length = 0 + number = 0 + } + lastKey = v.GetStartKey() + length += v.Value.Size + number += v.Value.Number + } + + if len(splitPoints) == 0 { + return nil + } + + helper.pool.ApplyOnErrorGroup(helper.eg, func() error { + newRegions, errSplit := regionSplitter.ExecuteOneRegion(ctx, region, splitPoints) + if errSplit != nil { + log.Warn("failed to split the scaned region", zap.Error(errSplit)) + sort.Slice(splitPoints, func(i, j int) bool { + return bytes.Compare(splitPoints[i], splitPoints[j]) < 0 + }) + return regionSplitter.ExecuteSortedKeys(ctx, splitPoints) + } + select { + case <-ctx.Done(): + return nil + case helper.regionsCh <- newRegions: + } + log.Info("split the region", zap.Uint64("region-id", region.Region.Id), zap.Int("split-point-number", len(splitPoints))) + return nil + }) + return nil +} + +// SplitPoint selects ranges overlapped with each region, and calls `splitF` to split the region +func SplitPoint( + ctx context.Context, + iter *splitHelperIterator, + client SplitClient, + splitF splitFunc, +) (err error) { + // common status + var ( + regionSplitter *RegionSplitter = NewRegionSplitter(client) + ) + // region traverse status + var ( + // the region buffer of each scan + regions []*RegionInfo = nil + regionIndex int = 0 + ) + // region split status + var ( + // range span +----------------+------+---+-------------+ + // region span +------------------------------------+ + // +initial length+ +end valued+ + // regionValueds is the ranges array overlapped with `regionInfo` + regionValueds []Valued = nil + // regionInfo is the region to be split + regionInfo *RegionInfo = nil + // intialLength is the length of the part of the first range overlapped with the region + initialLength uint64 = 0 + initialNumber int64 = 0 + ) + // range status + var ( + // regionOverCount is the number of regions overlapped with the range + regionOverCount uint64 = 0 + ) + + iter.Traverse(func(v Valued, endKey []byte, rule *restoreutils.RewriteRules) bool { + if v.Value.Number == 0 || v.Value.Size == 0 { + return true + } + var ( + vStartKey []byte + vEndKey []byte + ) + // use `vStartKey` and `vEndKey` to compare with region's key + vStartKey, vEndKey, err = restoreutils.GetRewriteEncodedKeys(v, rule) + if err != nil { + return false + } + // traverse to the first region overlapped with the range + for ; regionIndex < len(regions); regionIndex++ { + if bytes.Compare(vStartKey, regions[regionIndex].Region.EndKey) < 0 { + break + } + } + // cannot find any regions overlapped with the range + // need to scan regions again + if regionIndex == len(regions) { + regions = nil + } + regionOverCount = 0 + for { + if regionIndex >= len(regions) { + var startKey []byte + if len(regions) > 0 { + // has traversed over the region buffer, should scan from the last region's end-key of the region buffer + startKey = regions[len(regions)-1].Region.EndKey + } else { + // scan from the range's start-key + startKey = vStartKey + } + // scan at most 64 regions into the region buffer + regions, err = ScanRegionsWithRetry(ctx, client, startKey, endKey, 64) + if err != nil { + return false + } + regionIndex = 0 + } + + region := regions[regionIndex] + // this region must be overlapped with the range + regionOverCount++ + // the region is the last one overlapped with the range, + // should split the last recorded region, + // and then record this region as the region to be split + if bytes.Compare(vEndKey, region.Region.EndKey) < 0 { + endLength := v.Value.Size / regionOverCount + endNumber := v.Value.Number / int64(regionOverCount) + if len(regionValueds) > 0 && regionInfo != region { + // add a part of the range as the end part + if bytes.Compare(vStartKey, regionInfo.Region.EndKey) < 0 { + regionValueds = append(regionValueds, NewValued(vStartKey, regionInfo.Region.EndKey, Value{Size: endLength, Number: endNumber})) + } + // try to split the region + err = splitF(ctx, regionSplitter, initialLength, initialNumber, regionInfo, regionValueds) + if err != nil { + return false + } + regionValueds = make([]Valued, 0) + } + if regionOverCount == 1 { + // the region completely contains the range + regionValueds = append(regionValueds, Valued{ + Key: Span{ + StartKey: vStartKey, + EndKey: vEndKey, + }, + Value: v.Value, + }) + } else { + // the region is overlapped with the last part of the range + initialLength = endLength + initialNumber = endNumber + } + regionInfo = region + // try the next range + return true + } + + // try the next region + regionIndex++ + } + }) + + if err != nil { + return errors.Trace(err) + } + if len(regionValueds) > 0 { + // try to split the region + err = splitF(ctx, regionSplitter, initialLength, initialNumber, regionInfo, regionValueds) + if err != nil { + return errors.Trace(err) + } + } + + return nil +} + +func (helper *LogSplitHelper) Split(ctx context.Context) error { + var ectx context.Context + var wg sync.WaitGroup + helper.eg, ectx = errgroup.WithContext(ctx) + helper.regionsCh = make(chan []*RegionInfo, 1024) + wg.Add(1) + go func() { + defer wg.Done() + scatterRegions := make([]*RegionInfo, 0) + receiveNewRegions: + for { + select { + case <-ctx.Done(): + return + case newRegions, ok := <-helper.regionsCh: + if !ok { + break receiveNewRegions + } + + scatterRegions = append(scatterRegions, newRegions...) + } + } + + regionSplitter := NewRegionSplitter(helper.client) + // It is too expensive to stop recovery and wait for a small number of regions + // to complete scatter, so the maximum waiting time is reduced to 1 minute. + _ = regionSplitter.WaitForScatterRegionsTimeout(ctx, scatterRegions, time.Minute) + }() + + iter := helper.iterator() + if err := SplitPoint(ectx, iter, helper.client, helper.splitRegionByPoints); err != nil { + return errors.Trace(err) + } + + // wait for completion of splitting regions + if err := helper.eg.Wait(); err != nil { + return errors.Trace(err) + } + + // wait for completion of scattering regions + close(helper.regionsCh) + wg.Wait() + + return nil +} + +// RegionSplitter is a executor of region split by rules. +type RegionSplitter struct { + client SplitClient +} + +// NewRegionSplitter returns a new RegionSplitter. +func NewRegionSplitter(client SplitClient) *RegionSplitter { + return &RegionSplitter{ + client: client, + } +} + +// ExecuteOneRegion expose the function `SplitWaitAndScatter` of split client. +func (rs *RegionSplitter) ExecuteOneRegion(ctx context.Context, region *RegionInfo, keys [][]byte) ([]*RegionInfo, error) { + return rs.client.SplitWaitAndScatter(ctx, region, keys) +} + +// ExecuteSortedKeys executes regions split and make sure new splitted regions are balance. +// It will split regions by the rewrite rules, +// then it will split regions by the end key of each range. +// tableRules includes the prefix of a table, since some ranges may have +// a prefix with record sequence or index sequence. +// note: all ranges and rewrite rules must have raw key. +func (rs *RegionSplitter) ExecuteSortedKeys( + ctx context.Context, + sortedSplitKeys [][]byte, +) error { + if len(sortedSplitKeys) == 0 { + log.Info("skip split regions, no split keys") + return nil + } + + log.Info("execute split sorted keys", zap.Int("keys count", len(sortedSplitKeys))) + return rs.executeSplitByRanges(ctx, sortedSplitKeys) +} + +func (rs *RegionSplitter) executeSplitByRanges( + ctx context.Context, + sortedKeys [][]byte, +) error { + startTime := time.Now() + // Choose the rough region split keys, + // each splited region contains 128 regions to be splitted. + const regionIndexStep = 128 + + roughSortedSplitKeys := make([][]byte, 0, len(sortedKeys)/regionIndexStep+1) + for curRegionIndex := regionIndexStep; curRegionIndex < len(sortedKeys); curRegionIndex += regionIndexStep { + roughSortedSplitKeys = append(roughSortedSplitKeys, sortedKeys[curRegionIndex]) + } + if len(roughSortedSplitKeys) > 0 { + if err := rs.executeSplitByKeys(ctx, roughSortedSplitKeys); err != nil { + return errors.Trace(err) + } + } + log.Info("finish spliting regions roughly", zap.Duration("take", time.Since(startTime))) + + // Then send split requests to each TiKV. + if err := rs.executeSplitByKeys(ctx, sortedKeys); err != nil { + return errors.Trace(err) + } + + log.Info("finish spliting and scattering regions", zap.Duration("take", time.Since(startTime))) + return nil +} + +// executeSplitByKeys will split regions by **sorted** keys with following steps. +// 1. locate regions with correspond keys. +// 2. split these regions with correspond keys. +// 3. make sure new split regions are balanced. +func (rs *RegionSplitter) executeSplitByKeys( + ctx context.Context, + sortedKeys [][]byte, +) error { + startTime := time.Now() + scatterRegions, err := rs.client.SplitKeysAndScatter(ctx, sortedKeys) + if err != nil { + return errors.Trace(err) + } + if len(scatterRegions) > 0 { + log.Info("finish splitting and scattering regions. and starts to wait", zap.Int("regions", len(scatterRegions)), + zap.Duration("take", time.Since(startTime))) + rs.waitRegionsScattered(ctx, scatterRegions, ScatterWaitUpperInterval) + } else { + log.Info("finish splitting regions.", zap.Duration("take", time.Since(startTime))) + } + return nil +} + +// waitRegionsScattered try to wait mutilple regions scatterd in 3 minutes. +// this could timeout, but if many regions scatterd the restore could continue +// so we don't wait long time here. +func (rs *RegionSplitter) waitRegionsScattered(ctx context.Context, scatterRegions []*RegionInfo, timeout time.Duration) { + log.Info("start to wait for scattering regions", zap.Int("regions", len(scatterRegions))) + startTime := time.Now() + leftCnt := rs.WaitForScatterRegionsTimeout(ctx, scatterRegions, timeout) + if leftCnt == 0 { + log.Info("waiting for scattering regions done", + zap.Int("regions", len(scatterRegions)), + zap.Duration("take", time.Since(startTime))) + } else { + log.Warn("waiting for scattering regions timeout", + zap.Int("not scattered Count", leftCnt), + zap.Int("regions", len(scatterRegions)), + zap.Duration("take", time.Since(startTime))) + } +} + +func (rs *RegionSplitter) WaitForScatterRegionsTimeout(ctx context.Context, regionInfos []*RegionInfo, timeout time.Duration) int { + ctx2, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + leftRegions, _ := rs.client.WaitRegionsScattered(ctx2, regionInfos) + return leftRegions +} + func checkRegionConsistency(startKey, endKey []byte, regions []*RegionInfo) error { // current pd can't guarantee the consistency of returned regions if len(regions) == 0 { diff --git a/br/pkg/restore/split/split_test.go b/br/pkg/restore/split/split_test.go index 2250f7a96635c..4acef5dac84b7 100644 --- a/br/pkg/restore/split/split_test.go +++ b/br/pkg/restore/split/split_test.go @@ -5,16 +5,23 @@ import ( "bytes" "context" goerrors "errors" + "fmt" "slices" + "sort" "testing" "time" + "github.com/docker/go-units" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" berrors "github.com/pingcap/tidb/br/pkg/errors" + restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" + "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/store/pdtypes" @@ -756,3 +763,351 @@ func TestScanRegionsWithRetry(t *testing.T) { require.Equal(t, []byte("2"), regions[1].Region.StartKey) } } + +func TestScanEmptyRegion(t *testing.T) { + mockPDCli := NewMockPDClientForSplit() + mockPDCli.SetRegions([][]byte{{}, {12}, {34}, {}}) + client := NewClient(mockPDCli, nil, nil, 100, 4) + keys := initKeys() + // make keys has only one + keys = keys[0:1] + regionSplitter := NewRegionSplitter(client) + + ctx := context.Background() + err := regionSplitter.ExecuteSortedKeys(ctx, keys) + // should not return error with only one range entry + require.NoError(t, err) +} + +func TestSplitEmptyRegion(t *testing.T) { + mockPDCli := NewMockPDClientForSplit() + mockPDCli.SetRegions([][]byte{{}, {12}, {34}, {}}) + client := NewClient(mockPDCli, nil, nil, 100, 4) + regionSplitter := NewRegionSplitter(client) + err := regionSplitter.ExecuteSortedKeys(context.Background(), nil) + require.NoError(t, err) +} + +// region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) +// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) +// rewrite rules: aa -> xx, cc -> bb +// expected regions after split: +// +// [, aay), [aay, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), +// [bbj, cca), [cca, xxe), [xxe, xxz), [xxz, ) +func TestSplitAndScatter(t *testing.T) { + rangeBoundaries := [][]byte{[]byte(""), []byte("aay"), []byte("bba"), []byte("bbh"), []byte("cca"), []byte("")} + encodeBytes(rangeBoundaries) + mockPDCli := NewMockPDClientForSplit() + mockPDCli.SetRegions(rangeBoundaries) + client := NewClient(mockPDCli, nil, nil, 100, 4) + regionSplitter := NewRegionSplitter(client) + ctx := context.Background() + + ranges := initRanges() + rules := initRewriteRules() + splitKeys := make([][]byte, 0, len(ranges)) + for _, rg := range ranges { + tmp, err := restoreutils.RewriteRange(&rg, rules) + require.NoError(t, err) + splitKeys = append(splitKeys, tmp.EndKey) + } + sort.Slice(splitKeys, func(i, j int) bool { + return bytes.Compare(splitKeys[i], splitKeys[j]) < 0 + }) + err := regionSplitter.ExecuteSortedKeys(ctx, splitKeys) + require.NoError(t, err) + regions := mockPDCli.Regions.ScanRange(nil, nil, 100) + expected := [][]byte{[]byte(""), []byte("aay"), []byte("bba"), []byte("bbf"), []byte("bbh"), []byte("bbj"), []byte("cca"), []byte("xxe"), []byte("xxz"), []byte("")} + encodeBytes(expected) + require.Len(t, regions, len(expected)-1) + for i, region := range regions { + require.Equal(t, expected[i], region.Meta.StartKey) + require.Equal(t, expected[i+1], region.Meta.EndKey) + } +} + +func encodeBytes(keys [][]byte) { + for i := range keys { + if len(keys[i]) == 0 { + continue + } + keys[i] = codec.EncodeBytes(nil, keys[i]) + } +} + +func TestRawSplit(t *testing.T) { + // Fix issue #36490. + splitKeys := [][]byte{{}} + ctx := context.Background() + rangeBoundaries := [][]byte{[]byte(""), []byte("aay"), []byte("bba"), []byte("bbh"), []byte("cca"), []byte("")} + mockPDCli := NewMockPDClientForSplit() + mockPDCli.SetRegions(rangeBoundaries) + client := NewClient(mockPDCli, nil, nil, 100, 4, WithRawKV()) + + regionSplitter := NewRegionSplitter(client) + err := regionSplitter.ExecuteSortedKeys(ctx, splitKeys) + require.NoError(t, err) + + regions := mockPDCli.Regions.ScanRange(nil, nil, 100) + require.Len(t, regions, len(rangeBoundaries)-1) + for i, region := range regions { + require.Equal(t, rangeBoundaries[i], region.Meta.StartKey) + require.Equal(t, rangeBoundaries[i+1], region.Meta.EndKey) + } +} + +// keys: aae, aaz, ccf, ccj +func initKeys() [][]byte { + return [][]byte{ + []byte("aae"), + []byte("aaz"), + []byte("ccf"), + []byte("ccj"), + } +} + +// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) +func initRanges() []rtree.Range { + var ranges [4]rtree.Range + ranges[0] = rtree.Range{ + StartKey: []byte("aaa"), + EndKey: []byte("aae"), + } + ranges[1] = rtree.Range{ + StartKey: []byte("aae"), + EndKey: []byte("aaz"), + } + ranges[2] = rtree.Range{ + StartKey: []byte("ccd"), + EndKey: []byte("ccf"), + } + ranges[3] = rtree.Range{ + StartKey: []byte("ccf"), + EndKey: []byte("ccj"), + } + return ranges[:] +} + +func initRewriteRules() *restoreutils.RewriteRules { + var rules [2]*import_sstpb.RewriteRule + rules[0] = &import_sstpb.RewriteRule{ + OldKeyPrefix: []byte("aa"), + NewKeyPrefix: []byte("xx"), + } + rules[1] = &import_sstpb.RewriteRule{ + OldKeyPrefix: []byte("cc"), + NewKeyPrefix: []byte("bb"), + } + return &restoreutils.RewriteRules{ + Data: rules[:], + } +} + +func keyWithTablePrefix(tableID int64, key string) []byte { + rawKey := append(tablecodec.GenTableRecordPrefix(tableID), []byte(key)...) + return codec.EncodeBytes([]byte{}, rawKey) +} + +func TestSplitPoint(t *testing.T) { + ctx := context.Background() + var oldTableID int64 = 50 + var tableID int64 = 100 + rewriteRules := &restoreutils.RewriteRules{ + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.EncodeTablePrefix(oldTableID), + NewKeyPrefix: tablecodec.EncodeTablePrefix(tableID), + }, + }, + } + + // range: b c d e g i + // +---+ +---+ +---------+ + // +-------------+----------+---------+ + // region: a f h j + splitHelper := NewSplitHelper() + splitHelper.Merge(Valued{Key: Span{StartKey: keyWithTablePrefix(oldTableID, "b"), EndKey: keyWithTablePrefix(oldTableID, "c")}, Value: Value{Size: 100, Number: 100}}) + splitHelper.Merge(Valued{Key: Span{StartKey: keyWithTablePrefix(oldTableID, "d"), EndKey: keyWithTablePrefix(oldTableID, "e")}, Value: Value{Size: 200, Number: 200}}) + splitHelper.Merge(Valued{Key: Span{StartKey: keyWithTablePrefix(oldTableID, "g"), EndKey: keyWithTablePrefix(oldTableID, "i")}, Value: Value{Size: 300, Number: 300}}) + client := NewFakeSplitClient() + client.AppendRegion(keyWithTablePrefix(tableID, "a"), keyWithTablePrefix(tableID, "f")) + client.AppendRegion(keyWithTablePrefix(tableID, "f"), keyWithTablePrefix(tableID, "h")) + client.AppendRegion(keyWithTablePrefix(tableID, "h"), keyWithTablePrefix(tableID, "j")) + client.AppendRegion(keyWithTablePrefix(tableID, "j"), keyWithTablePrefix(tableID+1, "a")) + + iter := NewSplitHelperIteratorForTest(splitHelper, tableID, rewriteRules) + err := SplitPoint(ctx, iter, client, func(ctx context.Context, rs *RegionSplitter, u uint64, o int64, ri *RegionInfo, v []Valued) error { + require.Equal(t, u, uint64(0)) + require.Equal(t, o, int64(0)) + require.Equal(t, ri.Region.StartKey, keyWithTablePrefix(tableID, "a")) + require.Equal(t, ri.Region.EndKey, keyWithTablePrefix(tableID, "f")) + require.EqualValues(t, v[0].Key.StartKey, keyWithTablePrefix(tableID, "b")) + require.EqualValues(t, v[0].Key.EndKey, keyWithTablePrefix(tableID, "c")) + require.EqualValues(t, v[1].Key.StartKey, keyWithTablePrefix(tableID, "d")) + require.EqualValues(t, v[1].Key.EndKey, keyWithTablePrefix(tableID, "e")) + require.Equal(t, len(v), 2) + return nil + }) + require.NoError(t, err) +} + +func getCharFromNumber(prefix string, i int) string { + c := '1' + (i % 10) + b := '1' + (i%100)/10 + a := '1' + i/100 + return fmt.Sprintf("%s%c%c%c", prefix, a, b, c) +} + +func TestSplitPoint2(t *testing.T) { + ctx := context.Background() + var oldTableID int64 = 50 + var tableID int64 = 100 + rewriteRules := &restoreutils.RewriteRules{ + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.EncodeTablePrefix(oldTableID), + NewKeyPrefix: tablecodec.EncodeTablePrefix(tableID), + }, + }, + } + + // range: b c d e f i j k l n + // +---+ +---+ +-----------------+ +----+ +--------+ + // +---------------+--+.....+----+------------+---------+ + // region: a g >128 h m o + splitHelper := NewSplitHelper() + splitHelper.Merge(Valued{Key: Span{StartKey: keyWithTablePrefix(oldTableID, "b"), EndKey: keyWithTablePrefix(oldTableID, "c")}, Value: Value{Size: 100, Number: 100}}) + splitHelper.Merge(Valued{Key: Span{StartKey: keyWithTablePrefix(oldTableID, "d"), EndKey: keyWithTablePrefix(oldTableID, "e")}, Value: Value{Size: 200, Number: 200}}) + splitHelper.Merge(Valued{Key: Span{StartKey: keyWithTablePrefix(oldTableID, "f"), EndKey: keyWithTablePrefix(oldTableID, "i")}, Value: Value{Size: 300, Number: 300}}) + splitHelper.Merge(Valued{Key: Span{StartKey: keyWithTablePrefix(oldTableID, "j"), EndKey: keyWithTablePrefix(oldTableID, "k")}, Value: Value{Size: 200, Number: 200}}) + splitHelper.Merge(Valued{Key: Span{StartKey: keyWithTablePrefix(oldTableID, "l"), EndKey: keyWithTablePrefix(oldTableID, "n")}, Value: Value{Size: 200, Number: 200}}) + client := NewFakeSplitClient() + client.AppendRegion(keyWithTablePrefix(tableID, "a"), keyWithTablePrefix(tableID, "g")) + client.AppendRegion(keyWithTablePrefix(tableID, "g"), keyWithTablePrefix(tableID, getCharFromNumber("g", 0))) + for i := 0; i < 256; i++ { + client.AppendRegion(keyWithTablePrefix(tableID, getCharFromNumber("g", i)), keyWithTablePrefix(tableID, getCharFromNumber("g", i+1))) + } + client.AppendRegion(keyWithTablePrefix(tableID, getCharFromNumber("g", 256)), keyWithTablePrefix(tableID, "h")) + client.AppendRegion(keyWithTablePrefix(tableID, "h"), keyWithTablePrefix(tableID, "m")) + client.AppendRegion(keyWithTablePrefix(tableID, "m"), keyWithTablePrefix(tableID, "o")) + client.AppendRegion(keyWithTablePrefix(tableID, "o"), keyWithTablePrefix(tableID+1, "a")) + + firstSplit := true + iter := NewSplitHelperIteratorForTest(splitHelper, tableID, rewriteRules) + err := SplitPoint(ctx, iter, client, func(ctx context.Context, rs *RegionSplitter, u uint64, o int64, ri *RegionInfo, v []Valued) error { + if firstSplit { + require.Equal(t, u, uint64(0)) + require.Equal(t, o, int64(0)) + require.Equal(t, ri.Region.StartKey, keyWithTablePrefix(tableID, "a")) + require.Equal(t, ri.Region.EndKey, keyWithTablePrefix(tableID, "g")) + require.EqualValues(t, v[0].Key.StartKey, keyWithTablePrefix(tableID, "b")) + require.EqualValues(t, v[0].Key.EndKey, keyWithTablePrefix(tableID, "c")) + require.EqualValues(t, v[1].Key.StartKey, keyWithTablePrefix(tableID, "d")) + require.EqualValues(t, v[1].Key.EndKey, keyWithTablePrefix(tableID, "e")) + require.EqualValues(t, v[2].Key.StartKey, keyWithTablePrefix(tableID, "f")) + require.EqualValues(t, v[2].Key.EndKey, keyWithTablePrefix(tableID, "g")) + require.Equal(t, v[2].Value.Size, uint64(1)) + require.Equal(t, v[2].Value.Number, int64(1)) + require.Equal(t, len(v), 3) + firstSplit = false + } else { + require.Equal(t, u, uint64(1)) + require.Equal(t, o, int64(1)) + require.Equal(t, ri.Region.StartKey, keyWithTablePrefix(tableID, "h")) + require.Equal(t, ri.Region.EndKey, keyWithTablePrefix(tableID, "m")) + require.EqualValues(t, v[0].Key.StartKey, keyWithTablePrefix(tableID, "j")) + require.EqualValues(t, v[0].Key.EndKey, keyWithTablePrefix(tableID, "k")) + require.EqualValues(t, v[1].Key.StartKey, keyWithTablePrefix(tableID, "l")) + require.EqualValues(t, v[1].Key.EndKey, keyWithTablePrefix(tableID, "m")) + require.Equal(t, v[1].Value.Size, uint64(100)) + require.Equal(t, v[1].Value.Number, int64(100)) + require.Equal(t, len(v), 2) + } + return nil + }) + require.NoError(t, err) +} + +func fakeFile(tableID, rowID int64, length uint64, num int64) *backuppb.DataFileInfo { + return &backuppb.DataFileInfo{ + StartKey: fakeRowKey(tableID, rowID), + EndKey: fakeRowKey(tableID, rowID+1), + TableId: tableID, + Length: length, + NumberOfEntries: num, + } +} + +func fakeRowKey(tableID, rowID int64) kv.Key { + return codec.EncodeBytes(nil, tablecodec.EncodeRecordKey(tablecodec.GenTableRecordPrefix(tableID), kv.IntHandle(rowID))) +} + +func TestLogSplitHelper(t *testing.T) { + ctx := context.Background() + rules := map[int64]*restoreutils.RewriteRules{ + 1: { + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), + NewKeyPrefix: tablecodec.GenTableRecordPrefix(100), + }, + }, + }, + 2: { + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), + NewKeyPrefix: tablecodec.GenTableRecordPrefix(200), + }, + }, + }, + } + oriRegions := [][]byte{ + {}, + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + } + mockPDCli := NewMockPDClientForSplit() + mockPDCli.SetRegions(oriRegions) + client := NewClient(mockPDCli, nil, nil, 100, 4) + helper := NewLogSplitHelper(rules, client, 4*units.MiB, 400) + + helper.Merge(fakeFile(1, 100, 100, 100)) + helper.Merge(fakeFile(1, 200, 2*units.MiB, 200)) + helper.Merge(fakeFile(2, 100, 3*units.MiB, 300)) + helper.Merge(fakeFile(3, 100, 10*units.MiB, 100000)) + // different regions, no split happens + err := helper.Split(ctx) + require.NoError(t, err) + regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) + require.NoError(t, err) + require.Len(t, regions, 3) + require.Equal(t, []byte{}, regions[0].Meta.StartKey) + require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), regions[1].Meta.StartKey) + require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), regions[2].Meta.StartKey) + require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), regions[2].Meta.EndKey) + + helper.Merge(fakeFile(1, 300, 3*units.MiB, 10)) + helper.Merge(fakeFile(1, 400, 4*units.MiB, 10)) + // trigger to split regions for table 1 + err = helper.Split(ctx) + require.NoError(t, err) + regions, err = mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) + require.NoError(t, err) + require.Len(t, regions, 4) + require.Equal(t, fakeRowKey(100, 400), kv.Key(regions[1].Meta.EndKey)) +} + +func NewSplitHelperIteratorForTest(helper *SplitHelper, tableID int64, rule *restoreutils.RewriteRules) *splitHelperIterator { + return &splitHelperIterator{ + tableSplitters: []*rewriteSplitter{ + { + tableID: tableID, + rule: rule, + splitter: helper, + }, + }, + } +} diff --git a/br/pkg/restore/internal/log_split/sum_sorted.go b/br/pkg/restore/split/sum_sorted.go similarity index 99% rename from br/pkg/restore/internal/log_split/sum_sorted.go rename to br/pkg/restore/split/sum_sorted.go index fb5d3d8f9a0a2..1ab51588ba6ca 100644 --- a/br/pkg/restore/internal/log_split/sum_sorted.go +++ b/br/pkg/restore/split/sum_sorted.go @@ -1,5 +1,5 @@ // Copyright 2022 PingCAP, Inc. Licensed under Apache-2.0. -package logsplit +package split import ( "bytes" diff --git a/br/pkg/restore/internal/log_split/sum_sorted_test.go b/br/pkg/restore/split/sum_sorted_test.go similarity index 87% rename from br/pkg/restore/internal/log_split/sum_sorted_test.go rename to br/pkg/restore/split/sum_sorted_test.go index 634ed93f003b1..965a39d6242c4 100644 --- a/br/pkg/restore/internal/log_split/sum_sorted_test.go +++ b/br/pkg/restore/split/sum_sorted_test.go @@ -1,17 +1,17 @@ // Copyright 2022 PingCAP, Inc. Licensed under Apache-2.0. -package logsplit_test +package split_test import ( "fmt" "testing" - logsplit "github.com/pingcap/tidb/br/pkg/restore/internal/log_split" + split "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/stretchr/testify/require" ) -func v(s, e string, val logsplit.Value) logsplit.Valued { - return logsplit.Valued{ - Key: logsplit.Span{ +func v(s, e string, val split.Value) split.Valued { + return split.Valued{ + Key: split.Span{ StartKey: []byte(s), EndKey: []byte(e), }, @@ -19,8 +19,8 @@ func v(s, e string, val logsplit.Value) logsplit.Valued { } } -func mb(b uint64) logsplit.Value { - return logsplit.Value{ +func mb(b uint64) split.Value { + return split.Value{ Size: b * 1024 * 1024, Number: int64(b), } @@ -32,12 +32,12 @@ func exportString(startKey, endKey, size string, number int) string { func TestSumSorted(t *testing.T) { cases := []struct { - values []logsplit.Valued + values []split.Valued result []uint64 strs []string }{ { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("d", "g", mb(100)), @@ -50,7 +50,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("d", "f", mb(100)), @@ -63,7 +63,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("c", "f", mb(100)), @@ -76,7 +76,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("c", "f", mb(100)), @@ -91,7 +91,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("c", "f", mb(100)), @@ -108,7 +108,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("c", "f", mb(100)), @@ -125,7 +125,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("c", "f", mb(100)), @@ -142,7 +142,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("c", "f", mb(100)), @@ -159,7 +159,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("c", "f", mb(100)), @@ -176,7 +176,7 @@ func TestSumSorted(t *testing.T) { }, }, { - values: []logsplit.Valued{ + values: []split.Valued{ v("a", "f", mb(100)), v("a", "c", mb(200)), v("c", "f", mb(100)), @@ -195,14 +195,14 @@ func TestSumSorted(t *testing.T) { } for _, ca := range cases { - full := logsplit.NewSplitHelper() + full := split.NewSplitHelper() for i, v := range ca.values { require.Equal(t, ca.strs[i], v.String()) full.Merge(v) } i := 0 - full.Traverse(func(v logsplit.Valued) bool { + full.Traverse(func(v split.Valued) bool { require.Equal(t, mb(ca.result[i]), v.Value) i++ return true diff --git a/br/pkg/task/BUILD.bazel b/br/pkg/task/BUILD.bazel index 163d28fd6b0e2..77aba10ac0fcf 100644 --- a/br/pkg/task/BUILD.bazel +++ b/br/pkg/task/BUILD.bazel @@ -126,6 +126,7 @@ go_test( "//br/pkg/metautil", "//br/pkg/mock", "//br/pkg/restore/snap_client", + "//br/pkg/restore/split", "//br/pkg/restore/tiflashrec", "//br/pkg/storage", "//br/pkg/stream", diff --git a/br/pkg/task/restore_test.go b/br/pkg/task/restore_test.go index 4713e5a540ab7..86ceb3755ee09 100644 --- a/br/pkg/task/restore_test.go +++ b/br/pkg/task/restore_test.go @@ -19,6 +19,7 @@ import ( gluemock "github.com/pingcap/tidb/br/pkg/gluetidb/mock" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/mock" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/restore/tiflashrec" "github.com/pingcap/tidb/br/pkg/task" utiltest "github.com/pingcap/tidb/br/pkg/utiltest" @@ -58,7 +59,7 @@ func TestPreCheckTableTiFlashReplicas(t *testing.T) { }, } - pdClient := utiltest.NewFakePDClient(mockStores, false, nil) + pdClient := split.NewFakePDClient(mockStores, false, nil) tables := make([]*metautil.Table, 4) for i := 0; i < len(tables); i++ { diff --git a/br/pkg/utiltest/BUILD.bazel b/br/pkg/utiltest/BUILD.bazel index c8c70993f1488..97781c99c589d 100644 --- a/br/pkg/utiltest/BUILD.bazel +++ b/br/pkg/utiltest/BUILD.bazel @@ -2,23 +2,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "utiltest", - srcs = [ - "fake.go", - "suite.go", - ], + srcs = ["suite.go"], importpath = "github.com/pingcap/tidb/br/pkg/utiltest", visibility = ["//visibility:public"], deps = [ "//br/pkg/gluetidb/mock", "//br/pkg/mock", - "//br/pkg/pdutil", - "//br/pkg/restore/split", "//br/pkg/storage", - "@com_github_pingcap_kvproto//pkg/metapb", - "@com_github_pkg_errors//:errors", "@com_github_stretchr_testify//require", - "@com_github_tikv_pd_client//:client", - "@com_github_tikv_pd_client//http", - "@org_golang_google_grpc//keepalive", ], ) diff --git a/br/pkg/utiltest/fake.go b/br/pkg/utiltest/fake.go deleted file mode 100644 index 2d4cbaaef29ea..0000000000000 --- a/br/pkg/utiltest/fake.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utiltest - -import ( - "bytes" - "context" - "math" - "time" - - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/tidb/br/pkg/pdutil" - "github.com/pingcap/tidb/br/pkg/restore/split" - "github.com/pkg/errors" - pd "github.com/tikv/pd/client" - pdhttp "github.com/tikv/pd/client/http" - "google.golang.org/grpc/keepalive" -) - -var DefaultTestKeepaliveCfg = keepalive.ClientParameters{ - Time: 3 * time.Second, - Timeout: 10 * time.Second, -} - -var ( - ExpectPDCfgGeneratorsResult = map[string]any{ - "merge-schedule-limit": 0, - "leader-schedule-limit": float64(40), - "region-schedule-limit": float64(40), - "max-snapshot-count": float64(40), - "enable-location-replacement": "false", - "max-pending-peer-count": uint64(math.MaxInt32), - } - - ExistPDCfgGeneratorBefore = map[string]any{ - "merge-schedule-limit": 100, - "leader-schedule-limit": float64(100), - "region-schedule-limit": float64(100), - "max-snapshot-count": float64(100), - "enable-location-replacement": "true", - "max-pending-peer-count": 100, - } -) - -type FakePDHTTPClient struct { - pdhttp.Client - - expireSchedulers map[string]time.Time - cfgs map[string]any - - rules map[string]*pdhttp.Rule -} - -func NewFakePDHTTPClient() *FakePDHTTPClient { - return &FakePDHTTPClient{ - expireSchedulers: make(map[string]time.Time), - cfgs: make(map[string]any), - - rules: make(map[string]*pdhttp.Rule), - } -} - -func (fpdh *FakePDHTTPClient) GetScheduleConfig(_ context.Context) (map[string]any, error) { - return ExistPDCfgGeneratorBefore, nil -} - -func (fpdh *FakePDHTTPClient) GetSchedulers(_ context.Context) ([]string, error) { - schedulers := make([]string, 0, len(pdutil.Schedulers)) - for scheduler := range pdutil.Schedulers { - schedulers = append(schedulers, scheduler) - } - return schedulers, nil -} - -func (fpdh *FakePDHTTPClient) SetSchedulerDelay(_ context.Context, key string, delay int64) error { - expireTime, ok := fpdh.expireSchedulers[key] - if ok { - if time.Now().Compare(expireTime) > 0 { - return errors.Errorf("the scheduler config set is expired") - } - if delay == 0 { - delete(fpdh.expireSchedulers, key) - } - } - if !ok && delay == 0 { - return errors.Errorf("set the nonexistent scheduler") - } - expireTime = time.Now().Add(time.Second * time.Duration(delay)) - fpdh.expireSchedulers[key] = expireTime - return nil -} - -func (fpdh *FakePDHTTPClient) SetConfig(_ context.Context, config map[string]any, ttl ...float64) error { - for key, value := range config { - fpdh.cfgs[key] = value - } - return nil -} - -func (fpdh *FakePDHTTPClient) GetConfig(_ context.Context) (map[string]any, error) { - return fpdh.cfgs, nil -} - -func (fpdh *FakePDHTTPClient) GetDelaySchedulers() map[string]struct{} { - delaySchedulers := make(map[string]struct{}) - for key, t := range fpdh.expireSchedulers { - now := time.Now() - if now.Compare(t) < 0 { - delaySchedulers[key] = struct{}{} - } - } - return delaySchedulers -} - -func (fpdh *FakePDHTTPClient) GetPlacementRule(_ context.Context, groupID string, ruleID string) (*pdhttp.Rule, error) { - rule, ok := fpdh.rules[ruleID] - if !ok { - rule = &pdhttp.Rule{ - GroupID: groupID, - ID: ruleID, - } - fpdh.rules[ruleID] = rule - } - return rule, nil -} - -func (fpdh *FakePDHTTPClient) SetPlacementRule(_ context.Context, rule *pdhttp.Rule) error { - fpdh.rules[rule.ID] = rule - return nil -} - -func (fpdh *FakePDHTTPClient) DeletePlacementRule(_ context.Context, groupID string, ruleID string) error { - delete(fpdh.rules, ruleID) - return nil -} - -type FakePDClient struct { - pd.Client - stores []*metapb.Store - regions []*pd.Region - - notLeader bool - retryTimes *int - - peerStoreId uint64 -} - -func NewFakePDClient(stores []*metapb.Store, notLeader bool, retryTime *int) *FakePDClient { - var retryTimeInternal int - if retryTime == nil { - retryTime = &retryTimeInternal - } - return &FakePDClient{ - stores: stores, - - notLeader: notLeader, - retryTimes: retryTime, - - peerStoreId: 0, - } -} - -func (fpdc *FakePDClient) SetRegions(regions []*pd.Region) { - fpdc.regions = regions -} - -func (fpdc *FakePDClient) GetAllStores(context.Context, ...pd.GetStoreOption) ([]*metapb.Store, error) { - return append([]*metapb.Store{}, fpdc.stores...), nil -} - -func (fpdc *FakePDClient) ScanRegions( - ctx context.Context, - key, endKey []byte, - limit int, - opts ...pd.GetRegionOption, -) ([]*pd.Region, error) { - regions := make([]*pd.Region, 0, len(fpdc.regions)) - fpdc.peerStoreId = fpdc.peerStoreId + 1 - peerStoreId := (fpdc.peerStoreId + 1) / 2 - for _, region := range fpdc.regions { - if len(endKey) != 0 && bytes.Compare(region.Meta.StartKey, endKey) >= 0 { - continue - } - if len(region.Meta.EndKey) != 0 && bytes.Compare(region.Meta.EndKey, key) <= 0 { - continue - } - region.Meta.Peers = []*metapb.Peer{{StoreId: peerStoreId}} - regions = append(regions, region) - } - return regions, nil -} - -func (fpdc *FakePDClient) BatchScanRegions( - ctx context.Context, - ranges []pd.KeyRange, - limit int, - opts ...pd.GetRegionOption, -) ([]*pd.Region, error) { - regions := make([]*pd.Region, 0, len(fpdc.regions)) - fpdc.peerStoreId = fpdc.peerStoreId + 1 - peerStoreId := (fpdc.peerStoreId + 1) / 2 - for _, region := range fpdc.regions { - inRange := false - for _, keyRange := range ranges { - if len(keyRange.EndKey) != 0 && bytes.Compare(region.Meta.StartKey, keyRange.EndKey) >= 0 { - continue - } - if len(region.Meta.EndKey) != 0 && bytes.Compare(region.Meta.EndKey, keyRange.StartKey) <= 0 { - continue - } - inRange = true - } - if inRange { - region.Meta.Peers = []*metapb.Peer{{StoreId: peerStoreId}} - regions = append(regions, region) - } - } - return nil, nil -} - -func (fpdc *FakePDClient) GetTS(ctx context.Context) (int64, int64, error) { - (*fpdc.retryTimes)++ - if *fpdc.retryTimes >= 3 { // the mock PD leader switched successfully - fpdc.notLeader = false - } - - if fpdc.notLeader { - return 0, 0, errors.Errorf( - "rpc error: code = Unknown desc = [PD:tso:ErrGenerateTimestamp]generate timestamp failed, " + - "requested pd is not leader of cluster", - ) - } - return 1, 1, nil -} - -type FakeSplitClient struct { - split.SplitClient - regions []*split.RegionInfo -} - -func NewFakeSplitClient() *FakeSplitClient { - return &FakeSplitClient{ - regions: make([]*split.RegionInfo, 0), - } -} - -func (f *FakeSplitClient) AppendRegion(startKey, endKey []byte) { - f.regions = append(f.regions, &split.RegionInfo{ - Region: &metapb.Region{ - StartKey: startKey, - EndKey: endKey, - }, - }) -} - -func (f *FakeSplitClient) AppendPdRegion(region *pd.Region) { - f.regions = append(f.regions, &split.RegionInfo{ - Region: region.Meta, - Leader: region.Leader, - }) -} - -func (f *FakeSplitClient) ScanRegions( - ctx context.Context, - startKey, endKey []byte, - limit int, -) ([]*split.RegionInfo, error) { - result := make([]*split.RegionInfo, 0) - count := 0 - for _, rng := range f.regions { - if bytes.Compare(rng.Region.StartKey, endKey) <= 0 && bytes.Compare(rng.Region.EndKey, startKey) > 0 { - result = append(result, rng) - count++ - } - if count >= limit { - break - } - } - return result, nil -} - -func (f *FakeSplitClient) WaitRegionsScattered(context.Context, []*split.RegionInfo) (int, error) { - return 0, nil -} diff --git a/pkg/lightning/backend/local/BUILD.bazel b/pkg/lightning/backend/local/BUILD.bazel index 0c2f1811a34c9..80fd7691d13cc 100644 --- a/pkg/lightning/backend/local/BUILD.bazel +++ b/pkg/lightning/backend/local/BUILD.bazel @@ -129,7 +129,6 @@ go_test( "//br/pkg/mock/mocklocal", "//br/pkg/restore/split", "//br/pkg/storage", - "//br/pkg/utiltest", "//pkg/ddl", "//pkg/errno", "//pkg/keyspace", diff --git a/pkg/lightning/backend/local/local_check_test.go b/pkg/lightning/backend/local/local_check_test.go index 87dba07ef56b5..933a366f697a2 100644 --- a/pkg/lightning/backend/local/local_check_test.go +++ b/pkg/lightning/backend/local/local_check_test.go @@ -23,7 +23,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/tidb/br/pkg/utiltest" + "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/pkg/lightning/backend" "github.com/pingcap/tidb/pkg/lightning/backend/local" "github.com/pingcap/tidb/pkg/lightning/common" @@ -100,7 +100,7 @@ func TestGetRegionSplitSizeKeys(t *testing.T) { } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cli := utiltest.NewFakePDClient(allStores, false, nil) + cli := split.NewFakePDClient(allStores, false, nil) defer func() { local.SetGetSplitConfFromStoreFunc(local.GetSplitConfFromStore) }() From 84886df3757b2d12465753ebf3b3c7e1c346efad Mon Sep 17 00:00:00 2001 From: Jianjun Liao <36503113+Leavrth@users.noreply.github.com> Date: Tue, 12 Nov 2024 01:00:21 +0800 Subject: [PATCH 03/17] br: implement reading migration (#57237) close pingcap/tidb#57210 --- br/pkg/restore/log_client/BUILD.bazel | 4 +- br/pkg/restore/log_client/export_test.go | 34 +- br/pkg/restore/log_client/log_file_manager.go | 105 ++++-- .../log_client/log_file_manager_test.go | 7 +- br/pkg/restore/log_client/log_file_map.go | 90 +++++ br/pkg/restore/log_client/migration.go | 212 +++++++++++ br/pkg/restore/log_client/migration_test.go | 352 ++++++++++++++++++ br/pkg/utils/iter/combinator_types.go | 33 +- br/pkg/utils/iter/combinators.go | 7 + 9 files changed, 795 insertions(+), 49 deletions(-) create mode 100644 br/pkg/restore/log_client/migration.go create mode 100644 br/pkg/restore/log_client/migration_test.go diff --git a/br/pkg/restore/log_client/BUILD.bazel b/br/pkg/restore/log_client/BUILD.bazel index d55c8066514f3..85da59628f8b7 100644 --- a/br/pkg/restore/log_client/BUILD.bazel +++ b/br/pkg/restore/log_client/BUILD.bazel @@ -8,6 +8,7 @@ go_library( "import_retry.go", "log_file_manager.go", "log_file_map.go", + "migration.go", ], importpath = "github.com/pingcap/tidb/br/pkg/restore/log_client", visibility = ["//visibility:public"], @@ -81,10 +82,11 @@ go_test( "log_file_manager_test.go", "log_file_map_test.go", "main_test.go", + "migration_test.go", ], embed = [":log_client"], flaky = True, - shard_count = 41, + shard_count = 42, deps = [ "//br/pkg/errors", "//br/pkg/glue", diff --git a/br/pkg/restore/log_client/export_test.go b/br/pkg/restore/log_client/export_test.go index 9a35b35e8eb57..70a15e1ad2393 100644 --- a/br/pkg/restore/log_client/export_test.go +++ b/br/pkg/restore/log_client/export_test.go @@ -29,6 +29,38 @@ import ( var FilterFilesByRegion = filterFilesByRegion +func (metaname *MetaName) Meta() Meta { + return metaname.meta +} + +func NewMetaName(meta Meta, name string) *MetaName { + return &MetaName{meta: meta, name: name} +} + +func NewMigrationBuilder(shiftStartTS, startTS, restoredTS uint64) *WithMigrationsBuilder { + return &WithMigrationsBuilder{ + shiftStartTS: shiftStartTS, + startTS: startTS, + restoredTS: restoredTS, + } +} + +func (m *MetaWithMigrations) StoreId() int64 { + return m.meta.StoreId +} + +func (m *MetaWithMigrations) Meta() *backuppb.Metadata { + return m.meta +} + +func (m *PhysicalWithMigrations) PhysicalLength() uint64 { + return m.physical.Item.Length +} + +func (m *PhysicalWithMigrations) Physical() *backuppb.DataFileGroup { + return m.physical.Item +} + func (rc *LogClient) TEST_saveIDMap( ctx context.Context, sr *stream.SchemasReplace, @@ -44,7 +76,7 @@ func (rc *LogClient) TEST_initSchemasMap( } // readStreamMetaByTS is used for streaming task. collect all meta file by TS, it is for test usage. -func (rc *LogFileManager) ReadStreamMeta(ctx context.Context) ([]Meta, error) { +func (rc *LogFileManager) ReadStreamMeta(ctx context.Context) ([]*MetaName, error) { metas, err := rc.streamingMeta(ctx) if err != nil { return nil, err diff --git a/br/pkg/restore/log_client/log_file_manager.go b/br/pkg/restore/log_client/log_file_manager.go index 81af10cf542b0..cbaa6a594dff4 100644 --- a/br/pkg/restore/log_client/log_file_manager.go +++ b/br/pkg/restore/log_client/log_file_manager.go @@ -28,6 +28,14 @@ import ( // MetaIter is the type of iterator of metadata files' content. type MetaIter = iter.TryNextor[*backuppb.Metadata] +type MetaName struct { + meta Meta + name string +} + +// MetaNameIter is the type of iterator of metadata files' content with name. +type MetaNameIter = iter.TryNextor[*MetaName] + type LogDataFileInfo struct { *backuppb.DataFileInfo MetaDataGroupName string @@ -35,6 +43,18 @@ type LogDataFileInfo struct { OffsetInMergedGroup int } +// GroupIndex is the type of physical data file with index from metadata. +type GroupIndex = iter.Indexed[*backuppb.DataFileGroup] + +// GroupIndexIter is the type of iterator of physical data file with index from metadata. +type GroupIndexIter = iter.TryNextor[GroupIndex] + +// FileIndex is the type of logical data file with index from physical data file. +type FileIndex = iter.Indexed[*backuppb.DataFileInfo] + +// FileIndexIter is the type of iterator of logical data file with index from physical data file. +type FileIndexIter = iter.TryNextor[FileIndex] + // LogIter is the type of iterator of each log files' meta information. type LogIter = iter.TryNextor[*LogDataFileInfo] @@ -78,6 +98,8 @@ type LogFileManager struct { storage storage.ExternalStorage helper streamMetadataHelper + withmigrations WithMigrations + metadataDownloadBatchSize uint } @@ -87,6 +109,7 @@ type LogFileManagerInit struct { RestoreTS uint64 Storage storage.ExternalStorage + Migrations WithMigrations MetadataDownloadBatchSize uint EncryptionManager *encryption.Manager } @@ -100,10 +123,11 @@ type DDLMetaGroup struct { // Generally the config cannot be changed during its lifetime. func CreateLogFileManager(ctx context.Context, init LogFileManagerInit) (*LogFileManager, error) { fm := &LogFileManager{ - startTS: init.StartTS, - restoreTS: init.RestoreTS, - storage: init.Storage, - helper: stream.NewMetadataHelper(stream.WithEncryptionManager(init.EncryptionManager)), + startTS: init.StartTS, + restoreTS: init.RestoreTS, + storage: init.Storage, + helper: stream.NewMetadataHelper(stream.WithEncryptionManager(init.EncryptionManager)), + withmigrations: init.Migrations, metadataDownloadBatchSize: init.MetadataDownloadBatchSize, } @@ -153,22 +177,22 @@ func (rc *LogFileManager) loadShiftTS(ctx context.Context) error { return nil } -func (rc *LogFileManager) streamingMeta(ctx context.Context) (MetaIter, error) { +func (rc *LogFileManager) streamingMeta(ctx context.Context) (MetaNameIter, error) { return rc.streamingMetaByTS(ctx, rc.restoreTS) } -func (rc *LogFileManager) streamingMetaByTS(ctx context.Context, restoreTS uint64) (MetaIter, error) { +func (rc *LogFileManager) streamingMetaByTS(ctx context.Context, restoreTS uint64) (MetaNameIter, error) { it, err := rc.createMetaIterOver(ctx, rc.storage) if err != nil { return nil, err } - filtered := iter.FilterOut(it, func(metadata *backuppb.Metadata) bool { - return restoreTS < metadata.MinTs || metadata.MaxTs < rc.shiftStartTS + filtered := iter.FilterOut(it, func(metaname *MetaName) bool { + return restoreTS < metaname.meta.MinTs || metaname.meta.MaxTs < rc.shiftStartTS }) return filtered, nil } -func (rc *LogFileManager) createMetaIterOver(ctx context.Context, s storage.ExternalStorage) (MetaIter, error) { +func (rc *LogFileManager) createMetaIterOver(ctx context.Context, s storage.ExternalStorage) (MetaNameIter, error) { opt := &storage.WalkOption{SubDir: stream.GetStreamBackupMetaPrefix()} names := []string{} err := s.WalkDir(ctx, opt, func(path string, size int64) error { @@ -182,7 +206,7 @@ func (rc *LogFileManager) createMetaIterOver(ctx context.Context, s storage.Exte return nil, err } namesIter := iter.FromSlice(names) - readMeta := func(ctx context.Context, name string) (*backuppb.Metadata, error) { + readMeta := func(ctx context.Context, name string) (*MetaName, error) { f, err := s.ReadFile(ctx, name) if err != nil { return nil, errors.Annotatef(err, "failed during reading file %s", name) @@ -191,7 +215,7 @@ func (rc *LogFileManager) createMetaIterOver(ctx context.Context, s storage.Exte if err != nil { return nil, errors.Annotatef(err, "failed to parse metadata of file %s", name) } - return meta, nil + return &MetaName{meta: meta, name: name}, nil } // TODO: maybe we need to be able to adjust the concurrency to download files, // which currently is the same as the chunk size @@ -200,29 +224,32 @@ func (rc *LogFileManager) createMetaIterOver(ctx context.Context, s storage.Exte return reader, nil } -func (rc *LogFileManager) FilterDataFiles(ms MetaIter) LogIter { - return iter.FlatMap(ms, func(m *backuppb.Metadata) LogIter { - return iter.FlatMap(iter.Enumerate(iter.FromSlice(m.FileGroups)), func(gi iter.Indexed[*backuppb.DataFileGroup]) LogIter { - return iter.Map( - iter.FilterOut(iter.Enumerate(iter.FromSlice(gi.Item.DataFilesInfo)), func(di iter.Indexed[*backuppb.DataFileInfo]) bool { +func (rc *LogFileManager) FilterDataFiles(m MetaNameIter) LogIter { + ms := rc.withmigrations.Metas(m) + return iter.FlatMap(ms, func(m *MetaWithMigrations) LogIter { + gs := m.Physicals(iter.Enumerate(iter.FromSlice(m.meta.FileGroups))) + return iter.FlatMap(gs, func(gim *PhysicalWithMigrations) LogIter { + fs := iter.FilterOut( + gim.Logicals(iter.Enumerate(iter.FromSlice(gim.physical.Item.DataFilesInfo))), + func(di FileIndex) bool { // Modify the data internally, a little hacky. - if m.MetaVersion > backuppb.MetaVersion_V1 { - di.Item.Path = gi.Item.Path + if m.meta.MetaVersion > backuppb.MetaVersion_V1 { + di.Item.Path = gim.physical.Item.Path } return di.Item.IsMeta || rc.ShouldFilterOut(di.Item) - }), - func(di iter.Indexed[*backuppb.DataFileInfo]) *LogDataFileInfo { - return &LogDataFileInfo{ - DataFileInfo: di.Item, - - // Since there is a `datafileinfo`, the length of `m.FileGroups` - // must be larger than 0. So we use the first group's name as - // metadata's unique key. - MetaDataGroupName: m.FileGroups[0].Path, - OffsetInMetaGroup: gi.Index, - OffsetInMergedGroup: di.Index, - } - }, + }) + return iter.Map(fs, func(di FileIndex) *LogDataFileInfo { + return &LogDataFileInfo{ + DataFileInfo: di.Item, + + // Since there is a `datafileinfo`, the length of `m.FileGroups` + // must be larger than 0. So we use the first group's name as + // metadata's unique key. + MetaDataGroupName: m.meta.FileGroups[0].Path, + OffsetInMetaGroup: gim.physical.Index, + OffsetInMergedGroup: di.Index, + } + }, ) }) }) @@ -262,8 +289,8 @@ func (rc *LogFileManager) LoadDDLFilesAndCountDMLFiles(ctx context.Context, coun return nil, err } if counter != nil { - m = iter.Tap(m, func(m Meta) { - for _, fg := range m.FileGroups { + m = iter.Tap(m, func(m *MetaName) { + for _, fg := range m.meta.FileGroups { for _, f := range fg.DataFilesInfo { if !f.IsMeta && !rc.ShouldFilterOut(f) { *counter += 1 @@ -285,16 +312,16 @@ func (rc *LogFileManager) LoadDMLFiles(ctx context.Context) (LogIter, error) { return nil, err } - mg := rc.FilterDataFiles(m) - return mg, nil + l := rc.FilterDataFiles(m) + return l, nil } -func (rc *LogFileManager) FilterMetaFiles(ms MetaIter) MetaGroupIter { - return iter.FlatMap(ms, func(m Meta) MetaGroupIter { - return iter.Map(iter.FromSlice(m.FileGroups), func(g *backuppb.DataFileGroup) DDLMetaGroup { +func (rc *LogFileManager) FilterMetaFiles(ms MetaNameIter) MetaGroupIter { + return iter.FlatMap(ms, func(m *MetaName) MetaGroupIter { + return iter.Map(iter.FromSlice(m.meta.FileGroups), func(g *backuppb.DataFileGroup) DDLMetaGroup { metas := iter.FilterOut(iter.FromSlice(g.DataFilesInfo), func(d Log) bool { // Modify the data internally, a little hacky. - if m.MetaVersion > backuppb.MetaVersion_V1 { + if m.meta.MetaVersion > backuppb.MetaVersion_V1 { d.Path = g.Path } return !d.IsMeta || rc.ShouldFilterOut(d) diff --git a/br/pkg/restore/log_client/log_file_manager_test.go b/br/pkg/restore/log_client/log_file_manager_test.go index 82fcf628d0139..0ac289b65b8b0 100644 --- a/br/pkg/restore/log_client/log_file_manager_test.go +++ b/br/pkg/restore/log_client/log_file_manager_test.go @@ -244,7 +244,7 @@ func testReadMetaBetweenTSWithVersion(t *testing.T, m metaMaker) { req.NoError(err) actualStoreIDs := make([]int64, 0, len(metas)) for _, meta := range metas { - actualStoreIDs = append(actualStoreIDs, meta.StoreId) + actualStoreIDs = append(actualStoreIDs, meta.Meta().StoreId) } expectedStoreIDs := make([]int64, 0, len(c.expected)) for _, meta := range c.expected { @@ -528,6 +528,7 @@ func TestFilterDataFiles(t *testing.T) { RestoreTS: 10, Storage: loc, + Migrations: emptyMigrations(), MetadataDownloadBatchSize: 32, }) req.NoError(err) @@ -536,7 +537,9 @@ func TestFilterDataFiles(t *testing.T) { m2(wr(1, 1, 1), wr(2, 2, 2), wr(3, 3, 3), wr(4, 4, 4), wr(5, 5, 5)), m2(wr(1, 1, 1), wr(2, 2, 2)), } - metaIter := iter.FromSlice(metas) + metaIter := iter.Map(iter.FromSlice(metas), func(meta logclient.Meta) *logclient.MetaName { + return logclient.NewMetaName(meta, "") + }) files := iter.CollectAll(ctx, fm.FilterDataFiles(metaIter)).Item check := func(file *logclient.LogDataFileInfo, metaKey string, goff, foff int) { req.Equal(file.MetaDataGroupName, metaKey) diff --git a/br/pkg/restore/log_client/log_file_map.go b/br/pkg/restore/log_client/log_file_map.go index db50c8391418b..c7c05e1be4e7e 100644 --- a/br/pkg/restore/log_client/log_file_map.go +++ b/br/pkg/restore/log_client/log_file_map.go @@ -35,6 +35,18 @@ func (m bitMap) Hit(off int) bool { return (m[blockIndex] & bitOffset) > 0 } +type bitMapExt struct { + bitMap + skip bool +} + +func newBitMapExt(skip bool) bitMapExt { + return bitMapExt{ + bitMap: newBitMap(), + skip: skip, + } +} + type fileMap struct { // group index -> bitmap of kv files pos map[int]bitMap @@ -46,6 +58,19 @@ func newFileMap() fileMap { } } +type fileMapExt struct { + // group index -> bitmap of kv files + pos map[int]bitMapExt + skip bool +} + +func newFileMapExt(skip bool) fileMapExt { + return fileMapExt{ + pos: make(map[int]bitMapExt), + skip: skip, + } +} + type LogFilesSkipMap struct { // metadata group key -> group map skipMap map[string]fileMap @@ -82,3 +107,68 @@ func (m *LogFilesSkipMap) NeedSkip(metaKey string, groupOff, fileOff int) bool { } return gp.Hit(fileOff) } + +type LogFilesSkipMapExt struct { + // metadata group key -> group map + skipMap map[string]fileMapExt +} + +func NewLogFilesSkipMapExt() *LogFilesSkipMapExt { + return &LogFilesSkipMapExt{ + skipMap: make(map[string]fileMapExt), + } +} + +func (m *LogFilesSkipMapExt) Insert(metaKey string, groupOff, fileOff int) { + mp, exists := m.skipMap[metaKey] + if !exists { + mp = newFileMapExt(false) + m.skipMap[metaKey] = mp + } + if mp.skip { + return + } + gp, exists := mp.pos[groupOff] + if !exists { + gp = newBitMapExt(false) + mp.pos[groupOff] = gp + } + if gp.skip { + return + } + gp.Set(fileOff) +} + +func (m *LogFilesSkipMapExt) SkipMeta(metaKey string) { + m.skipMap[metaKey] = newFileMapExt(true) +} + +func (m *LogFilesSkipMapExt) SkipGroup(metaKey string, groupOff int) { + mp, exists := m.skipMap[metaKey] + if !exists { + mp = newFileMapExt(false) + m.skipMap[metaKey] = mp + } + if mp.skip { + return + } + mp.pos[groupOff] = newBitMapExt(true) +} + +func (m *LogFilesSkipMapExt) NeedSkip(metaKey string, groupOff, fileOff int) bool { + mp, exists := m.skipMap[metaKey] + if !exists { + return false + } + if mp.skip { + return true + } + gp, exists := mp.pos[groupOff] + if !exists { + return false + } + if gp.skip { + return true + } + return gp.Hit(fileOff) +} diff --git a/br/pkg/restore/log_client/migration.go b/br/pkg/restore/log_client/migration.go new file mode 100644 index 0000000000000..19d9d3daeb3cb --- /dev/null +++ b/br/pkg/restore/log_client/migration.go @@ -0,0 +1,212 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logclient + +import ( + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/tidb/br/pkg/utils/iter" +) + +type logicalSkipMap map[uint64]struct{} +type logicalFileSkipMap struct { + skipmap logicalSkipMap + skip bool +} +type physicalSkipMap map[string]*logicalFileSkipMap +type physicalFileSkipMap struct { + skipmap physicalSkipMap + skip bool +} +type metaSkipMap map[string]*physicalFileSkipMap + +func (skipmap metaSkipMap) skipMeta(metaPath string) { + skipmap[metaPath] = &physicalFileSkipMap{ + skip: true, + } +} + +func (skipmap metaSkipMap) skipPhysical(metaPath, physicalPath string) { + metaMap, exists := skipmap[metaPath] + if !exists { + metaMap = &physicalFileSkipMap{ + skipmap: make(map[string]*logicalFileSkipMap), + } + skipmap[metaPath] = metaMap + } else if metaMap.skip { + return + } + metaMap.skipmap[physicalPath] = &logicalFileSkipMap{ + skip: true, + } +} + +func (skipmap metaSkipMap) skipLogical(metaPath, physicalPath string, offset uint64) { + metaMap, exists := skipmap[metaPath] + if !exists { + metaMap = &physicalFileSkipMap{ + skipmap: make(map[string]*logicalFileSkipMap), + } + skipmap[metaPath] = metaMap + } else if metaMap.skip { + return + } + fileMap, exists := metaMap.skipmap[physicalPath] + if !exists { + fileMap = &logicalFileSkipMap{ + skipmap: make(map[uint64]struct{}), + } + metaMap.skipmap[physicalPath] = fileMap + } else if fileMap.skip { + return + } + fileMap.skipmap[offset] = struct{}{} +} + +func (skipmap metaSkipMap) NeedSkip(metaPath, physicalPath string, offset uint64) bool { + metaMap, exists := skipmap[metaPath] + if exists { + return false + } + if metaMap.skip { + return true + } + fileMap, exists := metaMap.skipmap[physicalPath] + if exists { + return false + } + if fileMap.skip { + return true + } + _, exists = fileMap.skipmap[offset] + return exists +} + +type WithMigrationsBuilder struct { + shiftStartTS uint64 + startTS uint64 + restoredTS uint64 +} + +func (builder *WithMigrationsBuilder) updateSkipMap(skipmap metaSkipMap, metas []*backuppb.MetaEdit) { + for _, meta := range metas { + if meta.DestructSelf { + skipmap.skipMeta(meta.Path) + continue + } + for _, path := range meta.DeletePhysicalFiles { + skipmap.skipPhysical(meta.Path, path) + } + for _, filesInPhysical := range meta.DeleteLogicalFiles { + for _, span := range filesInPhysical.Spans { + skipmap.skipLogical(meta.Path, filesInPhysical.Path, span.Offset) + } + } + } +} + +func (builder *WithMigrationsBuilder) coarseGrainedFilter(mig *backuppb.Migration) bool { + // Maybe the sst creation by compaction contains the kvs whose ts is larger than shift start ts. + // But currently log restore still restores the kvs. + // Besides, it indicates that the truncate task and the log restore task cannot be performed simultaneously. + // + // compaction until ts --+ +-- shift start ts + // v v + // log file [ .. .. .. .. ] + // + for _, compaction := range mig.Compactions { + if compaction.CompactionUntilTs < builder.shiftStartTS || compaction.CompactionFromTs > builder.restoredTS { + return true + } + } + return false +} + +// Create the wrapper by migrations. +func (builder *WithMigrationsBuilder) Build(migs []*backuppb.Migration) WithMigrations { + skipmap := make(metaSkipMap) + for _, mig := range migs { + // TODO: deal with TruncatedTo and DestructPrefix + if builder.coarseGrainedFilter(mig) { + continue + } + builder.updateSkipMap(skipmap, mig.EditMeta) + } + return WithMigrations(skipmap) +} + +type PhysicalMigrationsIter = iter.TryNextor[*PhysicalWithMigrations] + +type PhysicalWithMigrations struct { + skipmap logicalSkipMap + physical GroupIndex +} + +func (pwm *PhysicalWithMigrations) Logicals(fileIndexIter FileIndexIter) FileIndexIter { + return iter.FilterOut(fileIndexIter, func(fileIndex FileIndex) bool { + if pwm.skipmap != nil { + if _, ok := pwm.skipmap[fileIndex.Item.RangeOffset]; ok { + return true + } + } + return false + }) +} + +type MetaMigrationsIter = iter.TryNextor[*MetaWithMigrations] + +type MetaWithMigrations struct { + skipmap physicalSkipMap + meta Meta +} + +func (mwm *MetaWithMigrations) Physicals(groupIndexIter GroupIndexIter) PhysicalMigrationsIter { + return iter.MapFilter(groupIndexIter, func(groupIndex GroupIndex) (*PhysicalWithMigrations, bool) { + var logiSkipmap logicalSkipMap = nil + if mwm.skipmap != nil { + skipmap := mwm.skipmap[groupIndex.Item.Path] + if skipmap != nil { + if skipmap.skip { + return nil, true + } + logiSkipmap = skipmap.skipmap + } + } + return &PhysicalWithMigrations{ + skipmap: logiSkipmap, + physical: groupIndex, + }, false + }) +} + +type WithMigrations metaSkipMap + +func (wm WithMigrations) Metas(metaNameIter MetaNameIter) MetaMigrationsIter { + return iter.MapFilter(metaNameIter, func(mname *MetaName) (*MetaWithMigrations, bool) { + var phySkipmap physicalSkipMap = nil + if wm != nil { + skipmap := wm[mname.name] + if skipmap != nil { + if skipmap.skip { + return nil, true + } + phySkipmap = skipmap.skipmap + } + } + return &MetaWithMigrations{ + skipmap: phySkipmap, + meta: mname.meta, + }, false + }) +} diff --git a/br/pkg/restore/log_client/migration_test.go b/br/pkg/restore/log_client/migration_test.go new file mode 100644 index 0000000000000..0dd7b06197c7c --- /dev/null +++ b/br/pkg/restore/log_client/migration_test.go @@ -0,0 +1,352 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logclient_test + +import ( + "context" + "fmt" + "testing" + + backuppb "github.com/pingcap/kvproto/pkg/brpb" + logclient "github.com/pingcap/tidb/br/pkg/restore/log_client" + "github.com/pingcap/tidb/br/pkg/utils/iter" + "github.com/stretchr/testify/require" +) + +func emptyMigrations() logclient.WithMigrations { + return logclient.WithMigrations{} +} + +func nameFromID(prefix string, id uint64) string { + return fmt.Sprintf("%s_%d", prefix, id) +} + +func phyNameFromID(metaid, phyLen uint64) string { + return fmt.Sprintf("meta_%d_phy_%d", metaid, phyLen) +} + +func generateSpans(metaid, physicalLength, spanLength uint64) []*backuppb.Span { + spans := make([]*backuppb.Span, 0, spanLength) + for i := uint64(0); i < spanLength; i += 1 { + spans = append(spans, &backuppb.Span{ + Offset: lfl(metaid, physicalLength, i), + Length: 1, + }) + } + return spans +} + +func generateDeleteLogicalFiles(metaid, physicalLength, logicalLength uint64) []*backuppb.DeleteSpansOfFile { + spans := make([]*backuppb.DeleteSpansOfFile, 0, logicalLength) + spans = append(spans, &backuppb.DeleteSpansOfFile{ + Path: phyNameFromID(metaid, physicalLength), + Spans: generateSpans(metaid, physicalLength, logicalLength), + }) + return spans +} + +func generateDeletePhysicalFiles(metaid, physicalLength uint64) []string { + names := make([]string, 0, physicalLength) + for i := uint64(0); i < physicalLength; i += 1 { + names = append(names, phyNameFromID(metaid, i)) + } + return names +} + +func generateMigrationMeta(metaid uint64) *backuppb.MetaEdit { + return &backuppb.MetaEdit{ + Path: nameFromID("meta", metaid), + DestructSelf: true, + } +} + +func generateMigrationFile(metaid, physicalLength, physicalOffset, logicalLength uint64) *backuppb.MetaEdit { + return &backuppb.MetaEdit{ + Path: nameFromID("meta", metaid), + DeletePhysicalFiles: generateDeletePhysicalFiles(metaid, physicalLength), + DeleteLogicalFiles: generateDeleteLogicalFiles(metaid, physicalOffset, logicalLength), + DestructSelf: false, + } +} + +// mark the store id of metadata as test id identity +func generateMetaNameIter() logclient.MetaNameIter { + return iter.FromSlice([]*logclient.MetaName{ + logclient.NewMetaName(&backuppb.Metadata{StoreId: 0, FileGroups: generateGroupFiles(0, 3)}, nameFromID("meta", 0)), + logclient.NewMetaName(&backuppb.Metadata{StoreId: 1, FileGroups: generateGroupFiles(1, 3)}, nameFromID("meta", 1)), + logclient.NewMetaName(&backuppb.Metadata{StoreId: 2, FileGroups: generateGroupFiles(2, 3)}, nameFromID("meta", 2)), + }) +} + +// group file length +func gfl(storeId, length uint64) uint64 { + return storeId*100000 + length*100 +} + +func gfls(m [][]uint64) [][]uint64 { + glenss := make([][]uint64, 0, len(m)) + for storeId, gs := range m { + if len(gs) == 0 { + continue + } + glens := make([]uint64, 0, len(gs)) + for _, glen := range gs { + glens = append(glens, gfl(uint64(storeId), glen)) + } + glenss = append(glenss, glens) + } + return glenss +} + +// mark the length of group file as test id identity +func generateGroupFiles(metaId, length uint64) []*backuppb.DataFileGroup { + groupFiles := make([]*backuppb.DataFileGroup, 0, length) + for i := uint64(0); i < length; i += 1 { + groupFiles = append(groupFiles, &backuppb.DataFileGroup{ + Path: phyNameFromID(metaId, i), + Length: gfl(metaId, i), + DataFilesInfo: generateDataFiles(metaId, i, 3), + }) + } + return groupFiles +} + +// logical file length +func lfl(storeId, glen, plen uint64) uint64 { + return storeId*100000 + glen*100 + plen +} + +func lfls(m [][][]uint64) [][][]uint64 { + flensss := make([][][]uint64, 0, len(m)) + for storeId, glens := range m { + if len(glens) == 0 { + continue + } + flenss := make([][]uint64, 0, len(glens)) + for glen, fs := range glens { + if len(fs) == 0 { + continue + } + flens := make([]uint64, 0, len(fs)) + for _, flen := range fs { + flens = append(flens, lfl(uint64(storeId), uint64(glen), flen)) + } + flenss = append(flenss, flens) + } + flensss = append(flensss, flenss) + } + return flensss +} + +func generateDataFiles(metaId, glen, plen uint64) []*backuppb.DataFileInfo { + files := make([]*backuppb.DataFileInfo, 0, plen) + for i := uint64(0); i < plen; i += 1 { + files = append(files, &backuppb.DataFileInfo{ + Path: phyNameFromID(metaId, glen), + RangeOffset: lfl(metaId, glen, i), + Length: lfl(metaId, glen, i), + }) + } + return files +} + +func checkMetaNameIter(t *testing.T, expectStoreIds []int64, actualIter logclient.MetaMigrationsIter) { + res := iter.CollectAll(context.TODO(), iter.Map(actualIter, func(m *logclient.MetaWithMigrations) int64 { + return m.StoreId() + })) + require.NoError(t, res.Err) + require.Equal(t, expectStoreIds, res.Item) +} + +func checkPhysicalIter(t *testing.T, expectLengths []uint64, actualIter logclient.PhysicalMigrationsIter) { + res := iter.CollectAll(context.TODO(), iter.Map(actualIter, func(p *logclient.PhysicalWithMigrations) uint64 { + return p.PhysicalLength() + })) + require.NoError(t, res.Err) + require.Equal(t, expectLengths, res.Item) +} + +func checkLogicalIter(t *testing.T, expectLengths []uint64, actualIter logclient.FileIndexIter) { + res := iter.CollectAll(context.TODO(), iter.Map(actualIter, func(l logclient.FileIndex) uint64 { + return l.Item.Length + })) + require.NoError(t, res.Err) + require.Equal(t, expectLengths, res.Item) +} + +func generatePhysicalIter(meta *logclient.MetaWithMigrations) logclient.PhysicalMigrationsIter { + groupIter := iter.FromSlice(meta.Meta().FileGroups) + groupIndexIter := iter.Enumerate(groupIter) + return meta.Physicals(groupIndexIter) +} + +func generateLogicalIter(phy *logclient.PhysicalWithMigrations) logclient.FileIndexIter { + fileIter := iter.FromSlice(phy.Physical().DataFilesInfo) + fileIndexIter := iter.Enumerate(fileIter) + return phy.Logicals(fileIndexIter) +} + +func TestMigrations(t *testing.T) { + cases := []struct { + migrations []*backuppb.Migration + // test meta name iter + expectStoreIds []int64 + expectPhyLengths [][]uint64 + expectLogLengths [][][]uint64 + }{ + { + migrations: []*backuppb.Migration{ + { + EditMeta: []*backuppb.MetaEdit{ + generateMigrationMeta(0), + generateMigrationFile(2, 1, 2, 2), + }, + Compactions: []*backuppb.LogFileCompaction{ + { + CompactionFromTs: 0, + CompactionUntilTs: 9, + }, + }, + }, + }, + expectStoreIds: []int64{0, 1, 2}, + expectPhyLengths: gfls([][]uint64{ + {0, 1, 2}, {0, 1, 2}, {0, 1, 2}, + }), + expectLogLengths: lfls([][][]uint64{ + {{0, 1, 2}, {0, 1, 2}, {0, 1, 2}}, + {{0, 1, 2}, {0, 1, 2}, {0, 1, 2}}, + {{0, 1, 2}, {0, 1, 2}, {0, 1, 2}}, + }), + }, + { + migrations: []*backuppb.Migration{ + { + EditMeta: []*backuppb.MetaEdit{ + generateMigrationMeta(0), + generateMigrationFile(2, 1, 2, 2), + }, + Compactions: []*backuppb.LogFileCompaction{ + { + CompactionFromTs: 50, + CompactionUntilTs: 52, + }, + }, + }, + }, + expectStoreIds: []int64{1, 2}, + expectPhyLengths: gfls([][]uint64{ + { /*0, 1, 2*/ }, {0, 1, 2}, { /*0 */ 1, 2}, + }), + expectLogLengths: lfls([][][]uint64{ + { /*{0, 1, 2}, {0, 1, 2}, {0, 1, 2}*/ }, + {{0, 1, 2}, {0, 1, 2}, {0, 1, 2}}, + {{ /*0, 1, 2*/ }, {0, 1, 2}, { /*0, 1 */ 2}}, + }), + }, + { + migrations: []*backuppb.Migration{ + { + EditMeta: []*backuppb.MetaEdit{ + generateMigrationMeta(0), + }, + Compactions: []*backuppb.LogFileCompaction{ + { + CompactionFromTs: 50, + CompactionUntilTs: 52, + }, + }, + }, + { + EditMeta: []*backuppb.MetaEdit{ + generateMigrationFile(2, 1, 2, 2), + }, + Compactions: []*backuppb.LogFileCompaction{ + { + CompactionFromTs: 120, + CompactionUntilTs: 140, + }, + }, + }, + }, + expectStoreIds: []int64{1, 2}, + expectPhyLengths: gfls([][]uint64{ + { /*0, 1, 2*/ }, {0, 1, 2}, { /*0 */ 1, 2}, + }), + expectLogLengths: lfls([][][]uint64{ + { /*{0, 1, 2}, {0, 1, 2}, {0, 1, 2}*/ }, + {{0, 1, 2}, {0, 1, 2}, {0, 1, 2}}, + {{ /*0, 1, 2*/ }, {0, 1, 2}, { /*0, 1 */ 2}}, + }), + }, + { + migrations: []*backuppb.Migration{ + { + EditMeta: []*backuppb.MetaEdit{ + generateMigrationMeta(0), + }, + Compactions: []*backuppb.LogFileCompaction{ + { + CompactionFromTs: 50, + CompactionUntilTs: 52, + }, + }, + }, + { + EditMeta: []*backuppb.MetaEdit{ + generateMigrationFile(2, 1, 2, 2), + }, + Compactions: []*backuppb.LogFileCompaction{ + { + CompactionFromTs: 1200, + CompactionUntilTs: 1400, + }, + }, + }, + }, + expectStoreIds: []int64{1, 2}, + expectPhyLengths: gfls([][]uint64{ + { /*0, 1, 2*/ }, {0, 1, 2}, {0, 1, 2}, + }), + expectLogLengths: lfls([][][]uint64{ + { /*{0, 1, 2}, {0, 1, 2}, {0, 1, 2}*/ }, + {{0, 1, 2}, {0, 1, 2}, {0, 1, 2}}, + {{0, 1, 2}, {0, 1, 2}, {0, 1, 2}}, + }), + }, + } + + ctx := context.Background() + for _, cs := range cases { + builder := logclient.NewMigrationBuilder(10, 100, 200) + withMigrations := builder.Build(cs.migrations) + it := withMigrations.Metas(generateMetaNameIter()) + checkMetaNameIter(t, cs.expectStoreIds, it) + it = withMigrations.Metas(generateMetaNameIter()) + collect := iter.CollectAll(ctx, it) + require.NoError(t, collect.Err) + for j, meta := range collect.Item { + physicalIter := generatePhysicalIter(meta) + checkPhysicalIter(t, cs.expectPhyLengths[j], physicalIter) + physicalIter = generatePhysicalIter(meta) + collect := iter.CollectAll(ctx, physicalIter) + require.NoError(t, collect.Err) + for k, phy := range collect.Item { + logicalIter := generateLogicalIter(phy) + checkLogicalIter(t, cs.expectLogLengths[j][k], logicalIter) + } + } + } +} diff --git a/br/pkg/utils/iter/combinator_types.go b/br/pkg/utils/iter/combinator_types.go index dca8a9fe37bd2..34288d104236c 100644 --- a/br/pkg/utils/iter/combinator_types.go +++ b/br/pkg/utils/iter/combinator_types.go @@ -88,12 +88,6 @@ func (t *take[T]) TryNext(ctx context.Context) IterResult[T] { return t.inner.TryNext(ctx) } -type join[T any] struct { - inner TryNextor[TryNextor[T]] - - current TryNextor[T] -} - type pureMap[T, R any] struct { inner TryNextor[T] @@ -109,6 +103,33 @@ func (p pureMap[T, R]) TryNext(ctx context.Context) IterResult[R] { return Emit(p.mapper(r.Item)) } +type filterMap[T, R any] struct { + inner TryNextor[T] + + mapper func(T) (R, bool) +} + +func (f filterMap[T, R]) TryNext(ctx context.Context) IterResult[R] { + for { + r := f.inner.TryNext(ctx) + + if r.FinishedOrError() { + return DoneBy[R](r) + } + + res, skip := f.mapper(r.Item) + if !skip { + return Emit(res) + } + } +} + +type join[T any] struct { + inner TryNextor[TryNextor[T]] + + current TryNextor[T] +} + func (j *join[T]) TryNext(ctx context.Context) IterResult[T] { r := j.current.TryNext(ctx) if r.Err != nil { diff --git a/br/pkg/utils/iter/combinators.go b/br/pkg/utils/iter/combinators.go index 5fc8985eadcc3..6247237161912 100644 --- a/br/pkg/utils/iter/combinators.go +++ b/br/pkg/utils/iter/combinators.go @@ -84,6 +84,13 @@ func Map[T, R any](it TryNextor[T], mapper func(T) R) TryNextor[R] { } } +func MapFilter[T, R any](it TryNextor[T], mapper func(T) (R, bool)) TryNextor[R] { + return filterMap[T, R]{ + inner: it, + mapper: mapper, + } +} + // ConcatAll concatenates all elements yields by the iterators. // In another word, it 'chains' all the input iterators. func ConcatAll[T any](items ...TryNextor[T]) TryNextor[T] { From ef6759fe43fc516adf5de5a49f0275f0e1fc68f7 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Wed, 20 Nov 2024 10:56:56 +0800 Subject: [PATCH 04/17] Restore: implement restorer for compacted SST/Snapshot/log files (#57208) close pingcap/tidb#57209 --- br/pkg/restore/BUILD.bazel | 20 +- br/pkg/restore/import_mode_switcher.go | 15 +- br/pkg/restore/log_client/BUILD.bazel | 10 +- br/pkg/restore/log_client/client.go | 466 +++++++++++------ br/pkg/restore/log_client/client_test.go | 489 +++++++++++++++++- .../log_client/compacted_file_strategy.go | 112 ++++ br/pkg/restore/log_client/export_test.go | 2 +- .../restore/log_client/import_retry_test.go | 187 +------ br/pkg/restore/log_client/log_file_manager.go | 75 ++- .../log_client/log_file_manager_test.go | 14 +- .../restore/log_client/log_split_strategy.go | 107 ++++ br/pkg/restore/log_client/migration.go | 38 +- br/pkg/restore/log_client/migration_test.go | 4 +- br/pkg/restore/restorer.go | 367 +++++++++++++ br/pkg/restore/restorer_test.go | 280 ++++++++++ br/pkg/restore/snap_client/BUILD.bazel | 1 - br/pkg/restore/snap_client/client.go | 213 ++++---- br/pkg/restore/snap_client/client_test.go | 4 +- br/pkg/restore/snap_client/export_test.go | 22 +- br/pkg/restore/snap_client/import.go | 146 ++++-- br/pkg/restore/snap_client/import_test.go | 17 +- br/pkg/restore/snap_client/pipeline_items.go | 47 -- br/pkg/restore/snap_client/tikv_sender.go | 118 +---- .../restore/snap_client/tikv_sender_test.go | 50 +- br/pkg/restore/split/BUILD.bazel | 6 +- br/pkg/restore/split/mock_pd_client.go | 2 + br/pkg/restore/split/split.go | 352 +------------ br/pkg/restore/split/split_test.go | 93 +--- br/pkg/restore/split/splitter.go | 400 ++++++++++++++ br/pkg/restore/utils/rewrite_rule.go | 31 +- br/pkg/stream/stream_metas.go | 11 + br/pkg/task/restore.go | 40 +- br/pkg/task/restore_raw.go | 20 +- br/pkg/task/restore_txn.go | 14 +- br/pkg/task/stream.go | 111 ++-- br/pkg/utils/iter/combinator_types.go | 20 + br/pkg/utils/iter/combinators.go | 7 + 37 files changed, 2682 insertions(+), 1229 deletions(-) create mode 100644 br/pkg/restore/log_client/compacted_file_strategy.go create mode 100644 br/pkg/restore/log_client/log_split_strategy.go create mode 100644 br/pkg/restore/restorer.go create mode 100644 br/pkg/restore/restorer_test.go create mode 100644 br/pkg/restore/split/splitter.go diff --git a/br/pkg/restore/BUILD.bazel b/br/pkg/restore/BUILD.bazel index 5351b6ee4178d..584f73869b956 100644 --- a/br/pkg/restore/BUILD.bazel +++ b/br/pkg/restore/BUILD.bazel @@ -5,16 +5,22 @@ go_library( srcs = [ "import_mode_switcher.go", "misc.go", + "restorer.go", ], importpath = "github.com/pingcap/tidb/br/pkg/restore", visibility = ["//visibility:public"], deps = [ + "//br/pkg/checkpoint", "//br/pkg/conn", "//br/pkg/conn/util", "//br/pkg/errors", "//br/pkg/logutil", "//br/pkg/pdutil", + "//br/pkg/restore/split", + "//br/pkg/restore/utils", + "//br/pkg/summary", "//br/pkg/utils", + "//br/pkg/utils/iter", "//pkg/domain", "//pkg/kv", "//pkg/meta", @@ -22,8 +28,10 @@ go_library( "//pkg/parser/model", "//pkg/util", "@com_github_go_sql_driver_mysql//:mysql", + "@com_github_opentracing_opentracing_go//:opentracing-go", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", + "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/import_sstpb", "@com_github_pingcap_log//:log", "@com_github_tikv_client_go_v2//oracle", @@ -34,6 +42,7 @@ go_library( "@org_golang_google_grpc//credentials/insecure", "@org_golang_x_sync//errgroup", "@org_uber_go_zap//:zap", + "@org_uber_go_zap//zapcore", ], ) @@ -43,21 +52,28 @@ go_test( srcs = [ "import_mode_switcher_test.go", "misc_test.go", + "restorer_test.go", ], flaky = True, - race = "off", - shard_count = 6, + shard_count = 13, deps = [ ":restore", "//br/pkg/conn", "//br/pkg/mock", "//br/pkg/pdutil", "//br/pkg/restore/split", + "//br/pkg/restore/utils", + "//br/pkg/utils/iter", "//pkg/kv", "//pkg/parser/model", "//pkg/session", + "//pkg/tablecodec", + "//pkg/util", + "//pkg/util/codec", "@com_github_coreos_go_semver//semver", + "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", + "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/import_sstpb", "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_stretchr_testify//require", diff --git a/br/pkg/restore/import_mode_switcher.go b/br/pkg/restore/import_mode_switcher.go index 0ae69f4a6a0af..be01389c19e5f 100644 --- a/br/pkg/restore/import_mode_switcher.go +++ b/br/pkg/restore/import_mode_switcher.go @@ -5,6 +5,7 @@ package restore import ( "context" "crypto/tls" + "sync" "time" _ "github.com/go-sql-driver/mysql" // mysql driver @@ -46,9 +47,11 @@ func NewImportModeSwitcher( } } +var closeOnce sync.Once + // switchToNormalMode switch tikv cluster to normal mode. -func (switcher *ImportModeSwitcher) switchToNormalMode(ctx context.Context) error { - close(switcher.switchCh) +func (switcher *ImportModeSwitcher) SwitchToNormalMode(ctx context.Context) error { + closeOnce.Do(func() { close(switcher.switchCh) }) return switcher.switchTiKVMode(ctx, import_sstpb.SwitchMode_Normal) } @@ -113,8 +116,8 @@ func (switcher *ImportModeSwitcher) switchTiKVMode( return nil } -// switchToImportMode switch tikv cluster to import mode. -func (switcher *ImportModeSwitcher) switchToImportMode( +// SwitchToImportMode switch tikv cluster to import mode. +func (switcher *ImportModeSwitcher) SwitchToImportMode( ctx context.Context, ) { // tikv automatically switch to normal mode in every 10 minutes @@ -163,7 +166,7 @@ func RestorePreWork( if switchToImport { // Switch TiKV cluster to import mode (adjust rocksdb configuration). - switcher.switchToImportMode(ctx) + switcher.SwitchToImportMode(ctx) } return mgr.RemoveSchedulersWithConfig(ctx) @@ -186,7 +189,7 @@ func RestorePostWork( ctx = context.Background() } - if err := switcher.switchToNormalMode(ctx); err != nil { + if err := switcher.SwitchToNormalMode(ctx); err != nil { log.Warn("fail to switch to normal mode", zap.Error(err)) } if err := restoreSchedulers(ctx); err != nil { diff --git a/br/pkg/restore/log_client/BUILD.bazel b/br/pkg/restore/log_client/BUILD.bazel index 85da59628f8b7..7fb781e7ad0ef 100644 --- a/br/pkg/restore/log_client/BUILD.bazel +++ b/br/pkg/restore/log_client/BUILD.bazel @@ -4,10 +4,12 @@ go_library( name = "log_client", srcs = [ "client.go", + "compacted_file_strategy.go", "import.go", "import_retry.go", "log_file_manager.go", "log_file_map.go", + "log_split_strategy.go", "migration.go", ], importpath = "github.com/pingcap/tidb/br/pkg/restore/log_client", @@ -26,6 +28,7 @@ go_library( "//br/pkg/restore/ingestrec", "//br/pkg/restore/internal/import_client", "//br/pkg/restore/internal/rawkv", + "//br/pkg/restore/snap_client", "//br/pkg/restore/split", "//br/pkg/restore/tiflashrec", "//br/pkg/restore/utils", @@ -43,6 +46,7 @@ go_library( "//pkg/util", "//pkg/util/codec", "//pkg/util/redact", + "//pkg/util/sqlexec", "//pkg/util/table-filter", "@com_github_fatih_color//:color", "@com_github_gogo_protobuf//proto", @@ -86,12 +90,13 @@ go_test( ], embed = [":log_client"], flaky = True, - shard_count = 42, + shard_count = 45, deps = [ "//br/pkg/errors", "//br/pkg/glue", "//br/pkg/gluetidb", "//br/pkg/mock", + "//br/pkg/restore", "//br/pkg/restore/internal/import_client", "//br/pkg/restore/split", "//br/pkg/restore/utils", @@ -113,6 +118,7 @@ go_test( "//pkg/util/codec", "//pkg/util/sqlexec", "//pkg/util/table-filter", + "@com_github_docker_go_units//:go-units", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/brpb", @@ -120,10 +126,8 @@ go_test( "@com_github_pingcap_kvproto//pkg/errorpb", "@com_github_pingcap_kvproto//pkg/import_sstpb", "@com_github_pingcap_kvproto//pkg/metapb", - "@com_github_pingcap_kvproto//pkg/pdpb", "@com_github_pingcap_log//:log", "@com_github_stretchr_testify//require", - "@com_github_tikv_pd_client//:client", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", diff --git a/br/pkg/restore/log_client/client.go b/br/pkg/restore/log_client/client.go index 4160aa86a6048..4faf59a316657 100644 --- a/br/pkg/restore/log_client/client.go +++ b/br/pkg/restore/log_client/client.go @@ -47,6 +47,7 @@ import ( "github.com/pingcap/tidb/br/pkg/restore/ingestrec" importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" "github.com/pingcap/tidb/br/pkg/restore/internal/rawkv" + snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/restore/tiflashrec" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" @@ -62,6 +63,7 @@ import ( "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/model" tidbutil "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/sqlexec" filter "github.com/pingcap/tidb/pkg/util/table-filter" "github.com/tikv/client-go/v2/config" kvutil "github.com/tikv/client-go/v2/util" @@ -79,19 +81,122 @@ const maxSplitKeysOnce = 10240 // rawKVBatchCount specifies the count of entries that the rawkv client puts into TiKV. const rawKVBatchCount = 64 +// LogRestoreManager is a comprehensive wrapper that encapsulates all logic related to log restoration, +// including concurrency management, checkpoint handling, and file importing for efficient log processing. +type LogRestoreManager struct { + fileImporter *LogFileImporter + workerPool *tidbutil.WorkerPool + checkpointRunner *checkpoint.CheckpointRunner[checkpoint.LogRestoreKeyType, checkpoint.LogRestoreValueType] +} + +func NewLogRestoreManager( + ctx context.Context, + fileImporter *LogFileImporter, + poolSize uint, + createCheckpointSessionFn func() (glue.Session, error), +) (*LogRestoreManager, error) { + // for compacted reason, user only set --concurrency for log file restore speed. + log.Info("log restore worker pool", zap.Uint("size", poolSize)) + l := &LogRestoreManager{ + fileImporter: fileImporter, + workerPool: tidbutil.NewWorkerPool(poolSize, "log manager worker pool"), + } + se, err := createCheckpointSessionFn() + if err != nil { + return nil, errors.Trace(err) + } + + if se != nil { + l.checkpointRunner, err = checkpoint.StartCheckpointRunnerForLogRestore(ctx, se) + if err != nil { + return nil, errors.Trace(err) + } + } + return l, nil +} + +func (l *LogRestoreManager) Close(ctx context.Context) { + if l.fileImporter != nil { + if err := l.fileImporter.Close(); err != nil { + log.Warn("failed to close file importer") + } + } + if l.checkpointRunner != nil { + l.checkpointRunner.WaitForFinish(ctx, true) + } +} + +// SstRestoreManager is a comprehensive wrapper that encapsulates all logic related to sst restoration, +// including concurrency management, checkpoint handling, and file importing(splitting) for efficient log processing. +type SstRestoreManager struct { + restorer restore.SstRestorer + workerPool *tidbutil.WorkerPool + checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType] +} + +func (s *SstRestoreManager) Close(ctx context.Context) { + if s.restorer != nil { + if err := s.restorer.Close(); err != nil { + log.Warn("failed to close file restorer") + } + } + if s.checkpointRunner != nil { + s.checkpointRunner.WaitForFinish(ctx, true) + } +} + +func NewSstRestoreManager( + ctx context.Context, + snapFileImporter *snapclient.SnapFileImporter, + concurrencyPerStore uint, + storeCount uint, + createCheckpointSessionFn func() (glue.Session, error), +) (*SstRestoreManager, error) { + // This poolSize is similar to full restore, as both workflows are comparable. + // The poolSize should be greater than concurrencyPerStore multiplied by the number of stores. + poolSize := concurrencyPerStore * 32 * storeCount + log.Info("sst restore worker pool", zap.Uint("size", poolSize)) + sstWorkerPool := tidbutil.NewWorkerPool(poolSize, "sst file") + + s := &SstRestoreManager{ + workerPool: tidbutil.NewWorkerPool(poolSize, "log manager worker pool"), + } + se, err := createCheckpointSessionFn() + if err != nil { + return nil, errors.Trace(err) + } + if se != nil { + checkpointRunner, err := checkpoint.StartCheckpointRunnerForRestore(ctx, se) + if err != nil { + return nil, errors.Trace(err) + } + s.checkpointRunner = checkpointRunner + } + // TODO implement checkpoint + s.restorer = restore.NewSimpleSstRestorer(ctx, snapFileImporter, sstWorkerPool, nil) + return s, nil +} + type LogClient struct { - cipher *backuppb.CipherInfo - pdClient pd.Client - pdHTTPClient pdhttp.Client - clusterID uint64 - dom *domain.Domain - tlsConf *tls.Config - keepaliveConf keepalive.ClientParameters + *LogFileManager + logRestoreManager *LogRestoreManager + sstRestoreManager *SstRestoreManager + + cipher *backuppb.CipherInfo + pdClient pd.Client + pdHTTPClient pdhttp.Client + clusterID uint64 + dom *domain.Domain + tlsConf *tls.Config + keepaliveConf keepalive.ClientParameters + concurrencyPerStore uint rawKVClient *rawkv.RawKVBatchClient storage storage.ExternalStorage - se glue.Session + // unsafeSession is not thread-safe. + // Currently, it is only utilized in some initialization and post-handle functions. + unsafeSession glue.Session // currentTS is used for rewrite meta kv when restore stream. // Can not use `restoreTS` directly, because schema created in `full backup` maybe is new than `restoreTS`. @@ -99,11 +204,6 @@ type LogClient struct { upstreamClusterID uint64 - *LogFileManager - - workerPool *tidbutil.WorkerPool - fileImporter *LogFileImporter - // the query to insert rows into table `gc_delete_range`, lack of ts. deleteRangeQuery []*stream.PreDelRangeQuery deleteRangeQueryCh chan *stream.PreDelRangeQuery @@ -131,21 +231,80 @@ func NewRestoreClient( } // Close a client. -func (rc *LogClient) Close() { +func (rc *LogClient) Close(ctx context.Context) { + defer func() { + if rc.logRestoreManager != nil { + rc.logRestoreManager.Close(ctx) + } + if rc.sstRestoreManager != nil { + rc.sstRestoreManager.Close(ctx) + } + }() + // close the connection, and it must be succeed when in SQL mode. - if rc.se != nil { - rc.se.Close() + if rc.unsafeSession != nil { + rc.unsafeSession.Close() } if rc.rawKVClient != nil { rc.rawKVClient.Close() } + log.Info("Restore client closed") +} - if err := rc.fileImporter.Close(); err != nil { - log.Warn("failed to close file improter") +func (rc *LogClient) RestoreCompactedSstFiles( + ctx context.Context, + compactionsIter iter.TryNextor[*backuppb.LogFileSubcompaction], + rules map[int64]*restoreutils.RewriteRules, + importModeSwitcher *restore.ImportModeSwitcher, + onProgress func(int64), +) error { + backupFileSets := make([]restore.BackupFileSet, 0, 8) + // Collect all items from the iterator in advance to avoid blocking during restoration. + // This approach ensures that we have all necessary data ready for processing, + // preventing any potential delays caused by waiting for the iterator to yield more items. + for r := compactionsIter.TryNext(ctx); !r.Finished; r = compactionsIter.TryNext(ctx) { + if r.Err != nil { + return r.Err + } + i := r.Item + rewriteRules, ok := rules[i.Meta.TableId] + if !ok { + log.Warn("[Compacted SST Restore] Skipping excluded table during restore.", zap.Int64("table_id", i.Meta.TableId)) + continue + } + set := restore.BackupFileSet{ + TableID: i.Meta.TableId, + SSTFiles: i.SstOutputs, + RewriteRules: rewriteRules, + } + backupFileSets = append(backupFileSets, set) + } + if len(backupFileSets) == 0 { + log.Info("[Compacted SST Restore] No SST files found for restoration.") + return nil } + importModeSwitcher.SwitchToImportMode(ctx) + defer func() { + switchErr := importModeSwitcher.SwitchToNormalMode(ctx) + if switchErr != nil { + log.Warn("[Compacted SST Restore] Failed to switch back to normal mode after restoration.", zap.Error(switchErr)) + } + }() - log.Info("Restore client closed") + // To optimize performance and minimize cross-region downloads, + // we are currently opting for a single restore approach instead of batch restoration. + // This decision is similar to the handling of raw and txn restores, + // where batch processing may lead to increased complexity and potential inefficiencies. + // TODO: Future enhancements may explore the feasibility of reintroducing batch restoration + // while maintaining optimal performance and resource utilization. + for _, i := range backupFileSets { + err := rc.sstRestoreManager.restorer.GoRestore(onProgress, []restore.BackupFileSet{i}) + if err != nil { + return errors.Trace(err) + } + } + return rc.sstRestoreManager.restorer.WaitUntilFinish() } func (rc *LogClient) SetRawKVBatchClient( @@ -166,11 +325,6 @@ func (rc *LogClient) SetCrypter(crypter *backuppb.CipherInfo) { rc.cipher = crypter } -func (rc *LogClient) SetConcurrency(c uint) { - log.Info("download worker pool", zap.Uint("size", c)) - rc.workerPool = tidbutil.NewWorkerPool(c, "file") -} - func (rc *LogClient) SetUpstreamClusterID(upstreamClusterID uint64) { log.Info("upstream cluster id", zap.Uint64("cluster id", upstreamClusterID)) rc.upstreamClusterID = upstreamClusterID @@ -210,28 +364,19 @@ func (rc *LogClient) CleanUpKVFiles( ) error { // Current we only have v1 prefix. // In the future, we can add more operation for this interface. - return rc.fileImporter.ClearFiles(ctx, rc.pdClient, "v1") -} - -func (rc *LogClient) StartCheckpointRunnerForLogRestore(ctx context.Context, g glue.Glue, store kv.Storage) (*checkpoint.CheckpointRunner[checkpoint.LogRestoreKeyType, checkpoint.LogRestoreValueType], error) { - se, err := g.CreateSession(store) - if err != nil { - return nil, errors.Trace(err) - } - runner, err := checkpoint.StartCheckpointRunnerForLogRestore(ctx, se) - return runner, errors.Trace(err) + return rc.logRestoreManager.fileImporter.ClearFiles(ctx, rc.pdClient, "v1") } // Init create db connection and domain for storage. -func (rc *LogClient) Init(g glue.Glue, store kv.Storage) error { +func (rc *LogClient) Init(ctx context.Context, g glue.Glue, store kv.Storage) error { var err error - rc.se, err = g.CreateSession(store) + rc.unsafeSession, err = g.CreateSession(store) if err != nil { return errors.Trace(err) } // Set SQL mode to None for avoiding SQL compatibility problem - err = rc.se.Execute(context.Background(), "set @@sql_mode=''") + err = rc.unsafeSession.Execute(ctx, "set @@sql_mode=''") if err != nil { return errors.Trace(err) } @@ -244,7 +389,13 @@ func (rc *LogClient) Init(g glue.Glue, store kv.Storage) error { return nil } -func (rc *LogClient) InitClients(ctx context.Context, backend *backuppb.StorageBackend) { +func (rc *LogClient) InitClients( + ctx context.Context, + backend *backuppb.StorageBackend, + createSessionFn func() (glue.Session, error), + concurrency uint, + concurrencyPerStore uint, +) error { stores, err := conn.GetAllTiKVStoresWithRetry(ctx, rc.pdClient, util.SkipTiFlash) if err != nil { log.Fatal("failed to get stores", zap.Error(err)) @@ -252,7 +403,48 @@ func (rc *LogClient) InitClients(ctx context.Context, backend *backuppb.StorageB metaClient := split.NewClient(rc.pdClient, rc.pdHTTPClient, rc.tlsConf, maxSplitKeysOnce, len(stores)+1) importCli := importclient.NewImportClient(metaClient, rc.tlsConf, rc.keepaliveConf) - rc.fileImporter = NewLogFileImporter(metaClient, importCli, backend) + + rc.logRestoreManager, err = NewLogRestoreManager( + ctx, + NewLogFileImporter(metaClient, importCli, backend), + concurrency, + createSessionFn, + ) + if err != nil { + return errors.Trace(err) + } + var createCallBacks []func(*snapclient.SnapFileImporter) error + var closeCallBacks []func(*snapclient.SnapFileImporter) error + createCallBacks = append(createCallBacks, func(importer *snapclient.SnapFileImporter) error { + return importer.CheckMultiIngestSupport(ctx, stores) + }) + + opt := snapclient.NewSnapFileImporterOptions( + rc.cipher, metaClient, importCli, backend, + snapclient.RewriteModeKeyspace, stores, rc.concurrencyPerStore, createCallBacks, closeCallBacks, + ) + snapFileImporter, err := snapclient.NewSnapFileImporter( + ctx, rc.dom.Store().GetCodec().GetAPIVersion(), snapclient.TiDBCompcated, opt) + if err != nil { + return errors.Trace(err) + } + rc.sstRestoreManager, err = NewSstRestoreManager( + ctx, + snapFileImporter, + concurrencyPerStore, + uint(len(stores)), + createSessionFn, + ) + return errors.Trace(err) +} + +func (rc *LogClient) InitCheckpointMetadataForCompactedSstRestore( + ctx context.Context, +) (map[string]struct{}, error) { + // get sst checkpoint to skip repeated files + sstCheckpointSets := make(map[string]struct{}) + // TODO initial checkpoint + return sstCheckpointSets, nil } func (rc *LogClient) InitCheckpointMetadataForLogRestore( @@ -267,7 +459,7 @@ func (rc *LogClient) InitCheckpointMetadataForLogRestore( // for the first time. if checkpoint.ExistsLogRestoreCheckpointMetadata(ctx, rc.dom) { // load the checkpoint since this is not the first time to restore - meta, err := checkpoint.LoadCheckpointMetadataForLogRestore(ctx, rc.se.GetSessionCtx().GetRestrictedSQLExecutor()) + meta, err := checkpoint.LoadCheckpointMetadataForLogRestore(ctx, rc.unsafeSession.GetSessionCtx().GetRestrictedSQLExecutor()) if err != nil { return "", errors.Trace(err) } @@ -284,7 +476,7 @@ func (rc *LogClient) InitCheckpointMetadataForLogRestore( log.Info("save gc ratio into checkpoint metadata", zap.Uint64("start-ts", startTS), zap.Uint64("restored-ts", restoredTS), zap.Uint64("rewrite-ts", rc.currentTS), zap.String("gc-ratio", gcRatio), zap.Int("tiflash-item-count", len(items))) - if err := checkpoint.SaveCheckpointMetadataForLogRestore(ctx, rc.se, &checkpoint.CheckpointMetadataForLogRestore{ + if err := checkpoint.SaveCheckpointMetadataForLogRestore(ctx, rc.unsafeSession, &checkpoint.CheckpointMetadataForLogRestore{ UpstreamClusterID: rc.upstreamClusterID, RestoredTS: restoredTS, StartTS: startTS, @@ -298,6 +490,15 @@ func (rc *LogClient) InitCheckpointMetadataForLogRestore( return gcRatio, nil } +func (rc *LogClient) GetMigrations(ctx context.Context) ([]*backuppb.Migration, error) { + ext := stream.MigerationExtension(rc.storage) + migs, err := ext.Load(ctx) + if err != nil { + return nil, errors.Trace(err) + } + return migs.ListAll(), nil +} + func (rc *LogClient) InstallLogFileManager(ctx context.Context, startTS, restoreTS uint64, metadataDownloadBatchSize uint, encryptionManager *encryption.Manager) error { init := LogFileManagerInit{ @@ -305,6 +506,10 @@ func (rc *LogClient) InstallLogFileManager(ctx context.Context, startTS, restore RestoreTS: restoreTS, Storage: rc.storage, + MigrationsBuilder: &WithMigrationsBuilder{ + startTS: startTS, + restoredTS: restoreTS, + }, MetadataDownloadBatchSize: metadataDownloadBatchSize, EncryptionManager: encryptionManager, } @@ -477,9 +682,7 @@ func ApplyKVFilesWithSingleMethod( func (rc *LogClient) RestoreKVFiles( ctx context.Context, rules map[int64]*restoreutils.RewriteRules, - idrules map[int64]int64, logIter LogIter, - runner *checkpoint.CheckpointRunner[checkpoint.LogRestoreKeyType, checkpoint.LogRestoreValueType], pitrBatchCount uint32, pitrBatchSize uint32, updateStats func(kvCount uint64, size uint64), @@ -522,31 +725,26 @@ func (rc *LogClient) RestoreKVFiles( // For this version we do not handle new created table after full backup. // in next version we will perform rewrite and restore meta key to restore new created tables. // so we can simply skip the file that doesn't have the rule here. - onProgress(int64(len(files))) + onProgress(kvCount) summary.CollectInt("FileSkip", len(files)) log.Debug("skip file due to table id not matched", zap.Int64("table-id", files[0].TableId)) skipFile += len(files) } else { applyWg.Add(1) - downstreamId := idrules[files[0].TableId] - rc.workerPool.ApplyOnErrorGroup(eg, func() (err error) { + rc.logRestoreManager.workerPool.ApplyOnErrorGroup(eg, func() (err error) { fileStart := time.Now() defer applyWg.Done() defer func() { - onProgress(int64(len(files))) + onProgress(kvCount) updateStats(uint64(kvCount), size) summary.CollectInt("File", len(files)) if err == nil { filenames := make([]string, 0, len(files)) - if runner == nil { - for _, f := range files { - filenames = append(filenames, f.Path+", ") - } - } else { - for _, f := range files { - filenames = append(filenames, f.Path+", ") - if e := checkpoint.AppendRangeForLogRestore(ectx, runner, f.MetaDataGroupName, downstreamId, f.OffsetInMetaGroup, f.OffsetInMergedGroup); e != nil { + for _, f := range files { + filenames = append(filenames, f.Path+", ") + if rc.logRestoreManager.checkpointRunner != nil { + if e := checkpoint.AppendRangeForLogRestore(ectx, rc.logRestoreManager.checkpointRunner, f.MetaDataGroupName, rule.NewTableID, f.OffsetInMetaGroup, f.OffsetInMergedGroup); e != nil { err = errors.Annotate(e, "failed to append checkpoint data") break } @@ -557,13 +755,13 @@ func (rc *LogClient) RestoreKVFiles( } }() - return rc.fileImporter.ImportKVFiles(ectx, files, rule, rc.shiftStartTS, rc.startTS, rc.restoreTS, + return rc.logRestoreManager.fileImporter.ImportKVFiles(ectx, files, rule, rc.shiftStartTS, rc.startTS, rc.restoreTS, supportBatch, cipherInfo, masterKeys) }) } } - rc.workerPool.ApplyOnErrorGroup(eg, func() error { + rc.logRestoreManager.workerPool.ApplyOnErrorGroup(eg, func() error { if supportBatch { err = ApplyKVFilesWithBatchMethod(ectx, logIter, int(pitrBatchCount), uint64(pitrBatchSize), applyFunc, &applyWg) } else { @@ -590,7 +788,7 @@ func (rc *LogClient) initSchemasMap( restoreTS uint64, ) ([]*backuppb.PitrDBMap, error) { getPitrIDMapSQL := "SELECT segment_id, id_map FROM mysql.tidb_pitr_id_map WHERE restored_ts = %? and upstream_cluster_id = %? ORDER BY segment_id;" - execCtx := rc.se.GetSessionCtx().GetRestrictedSQLExecutor() + execCtx := rc.unsafeSession.GetSessionCtx().GetRestrictedSQLExecutor() rows, _, errSQL := execCtx.ExecRestrictedSQL( kv.WithInternalSourceType(ctx, kv.InternalTxnBR), nil, @@ -1246,36 +1444,45 @@ func (rc *LogClient) UpdateSchemaVersion(ctx context.Context) error { return nil } -func (rc *LogClient) WrapLogFilesIterWithSplitHelper(logIter LogIter, rules map[int64]*restoreutils.RewriteRules, g glue.Glue, store kv.Storage) (LogIter, error) { - se, err := g.CreateSession(store) - if err != nil { - return nil, errors.Trace(err) - } - execCtx := se.GetSessionCtx().GetRestrictedSQLExecutor() - splitSize, splitKeys := utils.GetRegionSplitInfo(execCtx) - log.Info("get split threshold from tikv config", zap.Uint64("split-size", splitSize), zap.Int64("split-keys", splitKeys)) +// WrapCompactedFilesIteratorWithSplit applies a splitting strategy to the compacted files iterator. +// It uses a region splitter to handle the splitting logic based on the provided rules and checkpoint sets. +func (rc *LogClient) WrapCompactedFilesIterWithSplitHelper( + ctx context.Context, + compactedIter iter.TryNextor[*backuppb.LogFileSubcompaction], + rules map[int64]*restoreutils.RewriteRules, + checkpointSets map[string]struct{}, + updateStatsFn func(uint64, uint64), + splitSize uint64, + splitKeys int64, +) (iter.TryNextor[*backuppb.LogFileSubcompaction], error) { client := split.NewClient(rc.pdClient, rc.pdHTTPClient, rc.tlsConf, maxSplitKeysOnce, 3) - return NewLogFilesIterWithSplitHelper(logIter, rules, client, splitSize, splitKeys), nil + wrapper := restore.PipelineRestorerWrapper[*backuppb.LogFileSubcompaction]{ + PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(client, splitSize, splitKeys), + } + strategy := NewCompactedFileSplitStrategy(rules, checkpointSets, updateStatsFn) + return wrapper.WithSplit(ctx, compactedIter, strategy), nil } -func (rc *LogClient) generateKvFilesSkipMap(ctx context.Context, downstreamIdset map[int64]struct{}) (*LogFilesSkipMap, error) { - skipMap := NewLogFilesSkipMap() - t, err := checkpoint.LoadCheckpointDataForLogRestore( - ctx, rc.se.GetSessionCtx().GetRestrictedSQLExecutor(), func(groupKey checkpoint.LogRestoreKeyType, off checkpoint.LogRestoreValueMarshaled) { - for tableID, foffs := range off.Foffs { - // filter out the checkpoint data of dropped table - if _, exists := downstreamIdset[tableID]; exists { - for _, foff := range foffs { - skipMap.Insert(groupKey, off.Goff, foff) - } - } - } - }) +// WrapLogFilesIteratorWithSplit applies a splitting strategy to the log files iterator. +// It uses a region splitter to handle the splitting logic based on the provided rules. +func (rc *LogClient) WrapLogFilesIterWithSplitHelper( + ctx context.Context, + logIter LogIter, + execCtx sqlexec.RestrictedSQLExecutor, + rules map[int64]*restoreutils.RewriteRules, + updateStatsFn func(uint64, uint64), + splitSize uint64, + splitKeys int64, +) (LogIter, error) { + client := split.NewClient(rc.pdClient, rc.pdHTTPClient, rc.tlsConf, maxSplitKeysOnce, 3) + wrapper := restore.PipelineRestorerWrapper[*LogDataFileInfo]{ + PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(client, splitSize, splitKeys), + } + strategy, err := NewLogSplitStrategy(ctx, rc.useCheckpoint, execCtx, rules, updateStatsFn) if err != nil { return nil, errors.Trace(err) } - summary.AdjustStartTimeToEarlierTime(t) - return skipMap, nil + return wrapper.WithSplit(ctx, logIter, strategy), nil } func WrapLogFilesIterWithCheckpointFailpoint( @@ -1304,27 +1511,6 @@ func WrapLogFilesIterWithCheckpointFailpoint( return logIter, nil } -func (rc *LogClient) WrapLogFilesIterWithCheckpoint( - ctx context.Context, - logIter LogIter, - downstreamIdset map[int64]struct{}, - updateStats func(kvCount, size uint64), - onProgress func(), -) (LogIter, error) { - skipMap, err := rc.generateKvFilesSkipMap(ctx, downstreamIdset) - if err != nil { - return nil, errors.Trace(err) - } - return iter.FilterOut(logIter, func(d *LogDataFileInfo) bool { - if skipMap.NeedSkip(d.MetaDataGroupName, d.OffsetInMetaGroup, d.OffsetInMergedGroup) { - onProgress() - updateStats(uint64(d.NumberOfEntries), d.Length) - return true - } - return false - }), nil -} - const ( alterTableDropIndexSQL = "ALTER TABLE %n.%n DROP INDEX %n" alterTableAddIndexFormat = "ALTER TABLE %%n.%%n ADD INDEX %%n(%s)" @@ -1339,7 +1525,7 @@ func (rc *LogClient) generateRepairIngestIndexSQLs( var sqls []checkpoint.CheckpointIngestIndexRepairSQL if rc.useCheckpoint { if checkpoint.ExistsCheckpointIngestIndexRepairSQLs(ctx, rc.dom) { - checkpointSQLs, err := checkpoint.LoadCheckpointIngestIndexRepairSQLs(ctx, rc.se.GetSessionCtx().GetRestrictedSQLExecutor()) + checkpointSQLs, err := checkpoint.LoadCheckpointIngestIndexRepairSQLs(ctx, rc.unsafeSession.GetSessionCtx().GetRestrictedSQLExecutor()) if err != nil { return sqls, false, errors.Trace(err) } @@ -1404,7 +1590,7 @@ func (rc *LogClient) generateRepairIngestIndexSQLs( } if rc.useCheckpoint && len(sqls) > 0 { - if err := checkpoint.SaveCheckpointIngestIndexRepairSQLs(ctx, rc.se, &checkpoint.CheckpointIngestIndexRepairSQLs{ + if err := checkpoint.SaveCheckpointIngestIndexRepairSQLs(ctx, rc.unsafeSession, &checkpoint.CheckpointIngestIndexRepairSQLs{ SQLs: sqls, }); err != nil { return sqls, false, errors.Trace(err) @@ -1466,7 +1652,7 @@ NEXTSQL: // only when first execution or old index id is not dropped if !fromCheckpoint || oldIndexIDFound { - if err := rc.se.ExecuteInternal(ctx, alterTableDropIndexSQL, sql.SchemaName.O, sql.TableName.O, sql.IndexName); err != nil { + if err := rc.unsafeSession.ExecuteInternal(ctx, alterTableDropIndexSQL, sql.SchemaName.O, sql.TableName.O, sql.IndexName); err != nil { return errors.Trace(err) } } @@ -1476,7 +1662,7 @@ NEXTSQL: } }) // create the repaired index when first execution or not found it - if err := rc.se.ExecuteInternal(ctx, sql.AddSQL, sql.AddArgs...); err != nil { + if err := rc.unsafeSession.ExecuteInternal(ctx, sql.AddSQL, sql.AddArgs...); err != nil { return errors.Trace(err) } w.Inc() @@ -1549,7 +1735,7 @@ func (rc *LogClient) InsertGCRows(ctx context.Context) error { // trim the ',' behind the query.Sql if exists // that's when the rewrite rule of the last table id is not exist sql := strings.TrimSuffix(query.Sql, ",") - if err := rc.se.ExecuteInternal(ctx, sql, paramsList...); err != nil { + if err := rc.unsafeSession.ExecuteInternal(ctx, sql, paramsList...); err != nil { return errors.Trace(err) } } @@ -1577,7 +1763,7 @@ func (rc *LogClient) saveIDMap( return errors.Trace(err) } // clean the dirty id map at first - err = rc.se.ExecuteInternal(ctx, "DELETE FROM mysql.tidb_pitr_id_map WHERE restored_ts = %? and upstream_cluster_id = %?;", rc.restoreTS, rc.upstreamClusterID) + err = rc.unsafeSession.ExecuteInternal(ctx, "DELETE FROM mysql.tidb_pitr_id_map WHERE restored_ts = %? and upstream_cluster_id = %?;", rc.restoreTS, rc.upstreamClusterID) if err != nil { return errors.Trace(err) } @@ -1587,7 +1773,7 @@ func (rc *LogClient) saveIDMap( if endIdx > len(data) { endIdx = len(data) } - err := rc.se.ExecuteInternal(ctx, replacePitrIDMapSQL, rc.restoreTS, rc.upstreamClusterID, segmentId, data[startIdx:endIdx]) + err := rc.unsafeSession.ExecuteInternal(ctx, replacePitrIDMapSQL, rc.restoreTS, rc.upstreamClusterID, segmentId, data[startIdx:endIdx]) if err != nil { return errors.Trace(err) } @@ -1596,7 +1782,7 @@ func (rc *LogClient) saveIDMap( if rc.useCheckpoint { log.Info("save checkpoint task info with InLogRestoreAndIdMapPersist status") - if err := checkpoint.SaveCheckpointProgress(ctx, rc.se, &checkpoint.CheckpointProgress{ + if err := checkpoint.SaveCheckpointProgress(ctx, rc.unsafeSession, &checkpoint.CheckpointProgress{ Progress: checkpoint.InLogRestoreAndIdMapPersist, }); err != nil { return errors.Trace(err) @@ -1613,7 +1799,6 @@ func (rc *LogClient) FailpointDoChecksumForLogRestore( ctx context.Context, kvClient kv.Client, pdClient pd.Client, - idrules map[int64]int64, rewriteRules map[int64]*restoreutils.RewriteRules, ) (finalErr error) { startTS, err := restore.GetTSWithRetry(ctx, rc.pdClient) @@ -1650,11 +1835,11 @@ func (rc *LogClient) FailpointDoChecksumForLogRestore( infoSchema := rc.GetDomain().InfoSchema() // downstream id -> upstream id reidRules := make(map[int64]int64) - for upstreamID, downstreamID := range idrules { - reidRules[downstreamID] = upstreamID + for upstreamID, r := range rewriteRules { + reidRules[r.NewTableID] = upstreamID } - for upstreamID, downstreamID := range idrules { - newTable, ok := infoSchema.TableByID(ctx, downstreamID) + for upstreamID, r := range rewriteRules { + newTable, ok := infoSchema.TableByID(ctx, r.NewTableID) if !ok { // a dropped table continue @@ -1715,52 +1900,3 @@ func (rc *LogClient) FailpointDoChecksumForLogRestore( return eg.Wait() } - -type LogFilesIterWithSplitHelper struct { - iter LogIter - helper *split.LogSplitHelper - buffer []*LogDataFileInfo - next int -} - -const SplitFilesBufferSize = 4096 - -func NewLogFilesIterWithSplitHelper(iter LogIter, rules map[int64]*restoreutils.RewriteRules, client split.SplitClient, splitSize uint64, splitKeys int64) LogIter { - return &LogFilesIterWithSplitHelper{ - iter: iter, - helper: split.NewLogSplitHelper(rules, client, splitSize, splitKeys), - buffer: nil, - next: 0, - } -} - -func (splitIter *LogFilesIterWithSplitHelper) TryNext(ctx context.Context) iter.IterResult[*LogDataFileInfo] { - if splitIter.next >= len(splitIter.buffer) { - splitIter.buffer = make([]*LogDataFileInfo, 0, SplitFilesBufferSize) - for r := splitIter.iter.TryNext(ctx); !r.Finished; r = splitIter.iter.TryNext(ctx) { - if r.Err != nil { - return r - } - f := r.Item - splitIter.helper.Merge(f.DataFileInfo) - splitIter.buffer = append(splitIter.buffer, f) - if len(splitIter.buffer) >= SplitFilesBufferSize { - break - } - } - splitIter.next = 0 - if len(splitIter.buffer) == 0 { - return iter.Done[*LogDataFileInfo]() - } - log.Info("start to split the regions") - startTime := time.Now() - if err := splitIter.helper.Split(ctx); err != nil { - return iter.Throw[*LogDataFileInfo](errors.Trace(err)) - } - log.Info("end to split the regions", zap.Duration("takes", time.Since(startTime))) - } - - res := iter.Emit(splitIter.buffer[splitIter.next]) - splitIter.next += 1 - return res -} diff --git a/br/pkg/restore/log_client/client_test.go b/br/pkg/restore/log_client/client_test.go index 6b16ec34c28ba..504d7bb798d72 100644 --- a/br/pkg/restore/log_client/client_test.go +++ b/br/pkg/restore/log_client/client_test.go @@ -22,12 +22,15 @@ import ( "testing" "time" + "github.com/docker/go-units" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/gluetidb" "github.com/pingcap/tidb/br/pkg/mock" + "github.com/pingcap/tidb/br/pkg/restore" logclient "github.com/pingcap/tidb/br/pkg/restore/log_client" "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/restore/utils" @@ -35,12 +38,14 @@ import ( "github.com/pingcap/tidb/br/pkg/utils/iter" "github.com/pingcap/tidb/br/pkg/utiltest" "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/planner/core/resolve" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util/chunk" + "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/sqlexec" filter "github.com/pingcap/tidb/pkg/util/table-filter" "github.com/stretchr/testify/require" @@ -92,7 +97,7 @@ func TestDeleteRangeQueryExec(t *testing.T) { g := gluetidb.New() client := logclient.NewRestoreClient( split.NewFakePDClient(nil, false, nil), nil, nil, keepalive.ClientParameters{}) - err := client.Init(g, m.Storage) + err := client.Init(ctx, g, m.Storage) require.NoError(t, err) client.RunGCRowsLoader(ctx) @@ -111,7 +116,7 @@ func TestDeleteRangeQuery(t *testing.T) { g := gluetidb.New() client := logclient.NewRestoreClient( split.NewFakePDClient(nil, false, nil), nil, nil, keepalive.ClientParameters{}) - err := client.Init(g, m.Storage) + err := client.Init(ctx, g, m.Storage) require.NoError(t, err) client.RunGCRowsLoader(ctx) @@ -1339,7 +1344,12 @@ func TestLogFilesIterWithSplitHelper(t *testing.T) { } mockIter := &mockLogIter{} ctx := context.Background() - logIter := logclient.NewLogFilesIterWithSplitHelper(mockIter, rewriteRulesMap, split.NewFakeSplitClient(), 144*1024*1024, 1440000) + w := restore.PipelineRestorerWrapper[*logclient.LogDataFileInfo]{ + PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(split.NewFakeSplitClient(), 144*1024*1024, 1440000), + } + s, err := logclient.NewLogSplitStrategy(ctx, false, nil, rewriteRulesMap, func(uint64, uint64) {}) + require.NoError(t, err) + logIter := w.WithSplit(context.Background(), mockIter, s) next := 0 for r := logIter.TryNext(ctx); !r.Finished; r = logIter.TryNext(ctx) { require.NoError(t, r.Err) @@ -1506,3 +1516,476 @@ func TestPITRIDMap(t *testing.T) { } } } + +type mockLogStrategy struct { + *logclient.LogSplitStrategy + expectSplitCount int +} + +func (m *mockLogStrategy) ShouldSplit() bool { + return m.AccumulateCount == m.expectSplitCount +} + +func TestLogSplitStrategy(t *testing.T) { + ctx := context.Background() + + // Define rewrite rules for table ID transformations. + rules := map[int64]*utils.RewriteRules{ + 1: { + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), + NewKeyPrefix: tablecodec.GenTableRecordPrefix(100), + }, + }, + }, + 2: { + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), + NewKeyPrefix: tablecodec.GenTableRecordPrefix(200), + }, + }, + }, + } + + // Define initial regions for the mock PD client. + oriRegions := [][]byte{ + {}, + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + } + + // Set up a mock PD client with predefined regions. + storesMap := make(map[uint64]*metapb.Store) + storesMap[1] = &metapb.Store{Id: 1} + mockPDCli := split.NewMockPDClientForSplit() + mockPDCli.SetStores(storesMap) + mockPDCli.SetRegions(oriRegions) + + // Create a split client with the mock PD client. + client := split.NewClient(mockPDCli, nil, nil, 100, 4) + + // Define a mock iterator with sample data files. + mockIter := iter.FromSlice([]*backuppb.DataFileInfo{ + fakeFile(1, 100, 100, 100), + fakeFile(1, 200, 2*units.MiB, 200), + fakeFile(2, 100, 3*units.MiB, 300), + fakeFile(3, 100, 10*units.MiB, 100000), + fakeFile(1, 300, 3*units.MiB, 10), + fakeFile(1, 400, 4*units.MiB, 10), + }) + logIter := toLogDataFileInfoIter(mockIter) + + // Initialize a wrapper for the file restorer with a region splitter. + wrapper := restore.PipelineRestorerWrapper[*logclient.LogDataFileInfo]{ + PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(client, 4*units.MB, 400), + } + + // Create a log split strategy with the given rewrite rules. + strategy, err := logclient.NewLogSplitStrategy(ctx, false, nil, rules, func(u1, u2 uint64) {}) + require.NoError(t, err) + + // Set up a mock strategy to control split behavior. + expectSplitCount := 2 + mockStrategy := &mockLogStrategy{ + LogSplitStrategy: strategy, + // fakeFile(3, 100, 10*units.MiB, 100000) will skipped due to no rewrite rule found. + expectSplitCount: expectSplitCount, + } + + // Use the wrapper to apply the split strategy to the log iterator. + helper := wrapper.WithSplit(ctx, logIter, mockStrategy) + + // Iterate over the log items and verify the split behavior. + count := 0 + for i := helper.TryNext(ctx); !i.Finished; i = helper.TryNext(ctx) { + require.NoError(t, i.Err) + if count == expectSplitCount { + // Verify that no split occurs initially due to insufficient data. + regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) + require.NoError(t, err) + require.Len(t, regions, 3) + require.Equal(t, []byte{}, regions[0].Meta.StartKey) + require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), regions[1].Meta.StartKey) + require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), regions[2].Meta.StartKey) + require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), regions[2].Meta.EndKey) + } + // iter.Filterout execute first + count += 1 + } + + // Verify that a split occurs on the second region due to excess data. + regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) + require.NoError(t, err) + require.Len(t, regions, 4) + require.Equal(t, fakeRowKey(100, 400), kv.Key(regions[1].Meta.EndKey)) +} + +type mockCompactedStrategy struct { + *logclient.CompactedFileSplitStrategy + expectSplitCount int +} + +func (m *mockCompactedStrategy) ShouldSplit() bool { + return m.AccumulateCount%m.expectSplitCount == 0 +} + +func TestCompactedSplitStrategy(t *testing.T) { + ctx := context.Background() + + rules := map[int64]*utils.RewriteRules{ + 1: { + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), + NewKeyPrefix: tablecodec.GenTableRecordPrefix(100), + }, + }, + }, + 2: { + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), + NewKeyPrefix: tablecodec.GenTableRecordPrefix(200), + }, + }, + }, + } + + oriRegions := [][]byte{ + {}, + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + } + + cases := []struct { + MockSubcompationIter iter.TryNextor[*backuppb.LogFileSubcompaction] + ExpectRegionEndKeys [][]byte + }{ + { + iter.FromSlice([]*backuppb.LogFileSubcompaction{ + fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), + fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), + fakeSubCompactionWithOneSst(2, 100, 48*units.MiB, 300), + fakeSubCompactionWithOneSst(3, 100, 100*units.MiB, 100000), + }), + // no split + // table 1 has not accumlate enough 400 keys or 4MB + [][]byte{ + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + }, + }, + { + iter.FromSlice([]*backuppb.LogFileSubcompaction{ + fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), + fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), + fakeSubCompactionWithOneSst(1, 100, 32*units.MiB, 10), + fakeSubCompactionWithOneSst(2, 100, 48*units.MiB, 300), + }), + // split on table 1 + // table 1 has accumlate enough keys + [][]byte{ + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + []byte(fakeRowKey(100, 200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + }, + }, + { + iter.FromSlice([]*backuppb.LogFileSubcompaction{ + fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), + fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), + fakeSubCompactionWithOneSst(2, 100, 32*units.MiB, 300), + fakeSubCompactionWithOneSst(3, 100, 10*units.MiB, 100000), + fakeSubCompactionWithOneSst(1, 300, 48*units.MiB, 13), + fakeSubCompactionWithOneSst(1, 400, 64*units.MiB, 14), + fakeSubCompactionWithOneSst(1, 100, 1*units.MiB, 15), + }), + [][]byte{ + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + []byte(fakeRowKey(100, 400)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + }, + }, + } + for _, ca := range cases { + storesMap := make(map[uint64]*metapb.Store) + storesMap[1] = &metapb.Store{Id: 1} + mockPDCli := split.NewMockPDClientForSplit() + mockPDCli.SetStores(storesMap) + mockPDCli.SetRegions(oriRegions) + + client := split.NewClient(mockPDCli, nil, nil, 100, 4) + wrapper := restore.PipelineRestorerWrapper[*backuppb.LogFileSubcompaction]{ + PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(client, 4*units.MB, 400), + } + + strategy := logclient.NewCompactedFileSplitStrategy(rules, nil, nil) + mockStrategy := &mockCompactedStrategy{ + CompactedFileSplitStrategy: strategy, + expectSplitCount: 3, + } + + helper := wrapper.WithSplit(ctx, ca.MockSubcompationIter, mockStrategy) + + for i := helper.TryNext(ctx); !i.Finished; i = helper.TryNext(ctx) { + require.NoError(t, i.Err) + } + + regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) + require.NoError(t, err) + require.Len(t, regions, len(ca.ExpectRegionEndKeys)) + for i, endKey := range ca.ExpectRegionEndKeys { + require.Equal(t, endKey, regions[i].Meta.EndKey) + } + } +} + +func TestCompactedSplitStrategyWithCheckpoint(t *testing.T) { + ctx := context.Background() + + rules := map[int64]*utils.RewriteRules{ + 1: { + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), + NewKeyPrefix: tablecodec.GenTableRecordPrefix(100), + }, + }, + }, + 2: { + Data: []*import_sstpb.RewriteRule{ + { + OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), + NewKeyPrefix: tablecodec.GenTableRecordPrefix(200), + }, + }, + }, + } + + oriRegions := [][]byte{ + {}, + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + } + + cases := []struct { + MockSubcompationIter iter.TryNextor[*backuppb.LogFileSubcompaction] + CheckpointSet map[string]struct{} + ProcessedKVCount int + ProcessedSize int + ExpectRegionEndKeys [][]byte + }{ + { + iter.FromSlice([]*backuppb.LogFileSubcompaction{ + fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), + fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), + fakeSubCompactionWithOneSst(2, 100, 48*units.MiB, 300), + fakeSubCompactionWithOneSst(3, 100, 100*units.MiB, 100000), + }), + map[string]struct{}{ + "1:100": {}, + "1:200": {}, + }, + 300, + 48 * units.MiB, + // no split, checkpoint files came in order + [][]byte{ + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + }, + }, + { + iter.FromSlice([]*backuppb.LogFileSubcompaction{ + fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), + fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), + fakeSubCompactionWithOneSst(1, 100, 32*units.MiB, 10), + fakeSubCompactionWithOneSst(2, 100, 48*units.MiB, 300), + }), + map[string]struct{}{ + "1:100": {}, + }, + 110, + 48 * units.MiB, + // no split, checkpoint files came in different order + [][]byte{ + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + }, + }, + { + iter.FromSlice([]*backuppb.LogFileSubcompaction{ + fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), + fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), + fakeSubCompactionWithOneSst(2, 100, 32*units.MiB, 300), + fakeSubCompactionWithOneSst(3, 100, 10*units.MiB, 100000), + fakeSubCompactionWithOneSst(1, 300, 48*units.MiB, 13), + fakeSubCompactionWithOneSst(1, 400, 64*units.MiB, 14), + fakeSubCompactionWithOneSst(1, 100, 1*units.MiB, 15), + }), + map[string]struct{}{ + "1:300": {}, + "1:400": {}, + }, + 27, + 112 * units.MiB, + // no split, the main file has skipped due to checkpoint. + [][]byte{ + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + }, + }, + { + iter.FromSlice([]*backuppb.LogFileSubcompaction{ + fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), + fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), + fakeSubCompactionWithOneSst(2, 100, 32*units.MiB, 300), + fakeSubCompactionWithOneSst(3, 100, 10*units.MiB, 100000), + fakeSubCompactionWithOneSst(1, 300, 48*units.MiB, 13), + fakeSubCompactionWithOneSst(1, 400, 64*units.MiB, 14), + fakeSubCompactionWithOneSst(1, 100, 1*units.MiB, 15), + }), + map[string]struct{}{ + "1:100": {}, + "1:200": {}, + }, + 315, + 49 * units.MiB, + [][]byte{ + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + []byte(fakeRowKey(100, 400)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + }, + }, + { + iter.FromSlice([]*backuppb.LogFileSubcompaction{ + fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), + fakeSubCompactionWithMultiSsts(1, 200, 32*units.MiB, 200), + fakeSubCompactionWithOneSst(2, 100, 32*units.MiB, 300), + fakeSubCompactionWithOneSst(3, 100, 10*units.MiB, 100000), + fakeSubCompactionWithOneSst(1, 300, 48*units.MiB, 13), + fakeSubCompactionWithOneSst(1, 400, 64*units.MiB, 14), + fakeSubCompactionWithOneSst(1, 100, 1*units.MiB, 15), + }), + map[string]struct{}{ + "1:100": {}, + "1:200": {}, + }, + 315, + 49 * units.MiB, + [][]byte{ + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), + []byte(fakeRowKey(100, 300)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), + codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), + }, + }, + } + for _, ca := range cases { + storesMap := make(map[uint64]*metapb.Store) + storesMap[1] = &metapb.Store{Id: 1} + mockPDCli := split.NewMockPDClientForSplit() + mockPDCli.SetStores(storesMap) + mockPDCli.SetRegions(oriRegions) + + client := split.NewClient(mockPDCli, nil, nil, 100, 4) + wrapper := restore.PipelineRestorerWrapper[*backuppb.LogFileSubcompaction]{ + PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(client, 4*units.MB, 400), + } + totalSize := 0 + totalKvCount := 0 + + strategy := logclient.NewCompactedFileSplitStrategy(rules, ca.CheckpointSet, func(u1, u2 uint64) { + totalKvCount += int(u1) + totalSize += int(u2) + }) + mockStrategy := &mockCompactedStrategy{ + CompactedFileSplitStrategy: strategy, + expectSplitCount: 3, + } + + helper := wrapper.WithSplit(ctx, ca.MockSubcompationIter, mockStrategy) + + for i := helper.TryNext(ctx); !i.Finished; i = helper.TryNext(ctx) { + require.NoError(t, i.Err) + } + + regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) + require.NoError(t, err) + require.Len(t, regions, len(ca.ExpectRegionEndKeys)) + for i, endKey := range ca.ExpectRegionEndKeys { + require.Equal(t, endKey, regions[i].Meta.EndKey) + } + require.Equal(t, totalKvCount, ca.ProcessedKVCount) + require.Equal(t, totalSize, ca.ProcessedSize) + } +} + +func fakeSubCompactionWithMultiSsts(tableID, rowID int64, length uint64, num uint64) *backuppb.LogFileSubcompaction { + return &backuppb.LogFileSubcompaction{ + Meta: &backuppb.LogFileSubcompactionMeta{ + TableId: tableID, + }, + SstOutputs: []*backuppb.File{ + { + Name: fmt.Sprintf("%d:%d", tableID, rowID), + StartKey: fakeRowRawKey(tableID, rowID), + EndKey: fakeRowRawKey(tableID, rowID+1), + Size_: length, + TotalKvs: num, + }, + { + Name: fmt.Sprintf("%d:%d", tableID, rowID+1), + StartKey: fakeRowRawKey(tableID, rowID+1), + EndKey: fakeRowRawKey(tableID, rowID+2), + Size_: length, + TotalKvs: num, + }, + }, + } +} +func fakeSubCompactionWithOneSst(tableID, rowID int64, length uint64, num uint64) *backuppb.LogFileSubcompaction { + return &backuppb.LogFileSubcompaction{ + Meta: &backuppb.LogFileSubcompactionMeta{ + TableId: tableID, + }, + SstOutputs: []*backuppb.File{ + { + Name: fmt.Sprintf("%d:%d", tableID, rowID), + StartKey: fakeRowRawKey(tableID, rowID), + EndKey: fakeRowRawKey(tableID, rowID+1), + Size_: length, + TotalKvs: num, + }, + }, + } +} + +func fakeFile(tableID, rowID int64, length uint64, num int64) *backuppb.DataFileInfo { + return &backuppb.DataFileInfo{ + StartKey: fakeRowKey(tableID, rowID), + EndKey: fakeRowKey(tableID, rowID+1), + TableId: tableID, + Length: length, + NumberOfEntries: num, + } +} + +func fakeRowKey(tableID, rowID int64) kv.Key { + return codec.EncodeBytes(nil, fakeRowRawKey(tableID, rowID)) +} + +func fakeRowRawKey(tableID, rowID int64) kv.Key { + return tablecodec.EncodeRecordKey(tablecodec.GenTableRecordPrefix(tableID), kv.IntHandle(rowID)) +} diff --git a/br/pkg/restore/log_client/compacted_file_strategy.go b/br/pkg/restore/log_client/compacted_file_strategy.go new file mode 100644 index 0000000000000..9637cf2e529b6 --- /dev/null +++ b/br/pkg/restore/log_client/compacted_file_strategy.go @@ -0,0 +1,112 @@ +// Copyright 2024 PingCAP, Inc. Licensed under Apache-2.0. + +package logclient + +import ( + "fmt" + + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/br/pkg/restore/split" + restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" + "github.com/pingcap/tidb/pkg/util/codec" + "go.uber.org/zap" +) + +// The impact factor is used to reduce the size and number of MVCC entries +// in SST files, helping to optimize performance and resource usage. +const impactFactor = 16 + +type CompactedFileSplitStrategy struct { + *split.BaseSplitStrategy + checkpointSets map[string]struct{} + checkpointFileProgressFn func(uint64, uint64) +} + +var _ split.SplitStrategy[*backuppb.LogFileSubcompaction] = &CompactedFileSplitStrategy{} + +func NewCompactedFileSplitStrategy( + rules map[int64]*restoreutils.RewriteRules, + checkpointsSet map[string]struct{}, + updateStatsFn func(uint64, uint64), +) *CompactedFileSplitStrategy { + return &CompactedFileSplitStrategy{ + BaseSplitStrategy: split.NewBaseSplitStrategy(rules), + checkpointSets: checkpointsSet, + checkpointFileProgressFn: updateStatsFn, + } +} + +func (cs *CompactedFileSplitStrategy) Accumulate(subCompaction *backuppb.LogFileSubcompaction) { + splitHelper, exist := cs.TableSplitter[subCompaction.Meta.TableId] + if !exist { + splitHelper = split.NewSplitHelper() + cs.TableSplitter[subCompaction.Meta.TableId] = splitHelper + } + + for _, f := range subCompaction.SstOutputs { + startKey := codec.EncodeBytes(nil, f.StartKey) + endKey := codec.EncodeBytes(nil, f.EndKey) + cs.AccumulateCount += 1 + if f.TotalKvs == 0 || f.Size_ == 0 { + log.Error("No key-value pairs in subcompaction", zap.String("name", f.Name)) + continue + } + // The number of MVCC entries in the compacted SST files can be excessive. + // This calculation takes the MVCC impact into account to optimize performance. + calculateCount := int64(f.TotalKvs) / impactFactor + if calculateCount == 0 { + // at least consider as 1 key impact + log.Warn(fmt.Sprintf("less than %d key-value pairs in subcompaction", impactFactor), zap.String("name", f.Name)) + calculateCount = 1 + } + calculateSize := f.Size_ / impactFactor + if calculateSize == 0 { + log.Warn(fmt.Sprintf("less than %d key-value size in subcompaction", impactFactor), zap.String("name", f.Name)) + calculateSize = 1 + } + splitHelper.Merge(split.Valued{ + Key: split.Span{ + StartKey: startKey, + EndKey: endKey, + }, + Value: split.Value{ + Size: calculateSize, + Number: calculateCount, + }, + }) + } +} + +func (cs *CompactedFileSplitStrategy) ShouldSplit() bool { + return cs.AccumulateCount > (4096 / impactFactor) +} + +func (cs *CompactedFileSplitStrategy) ShouldSkip(subCompaction *backuppb.LogFileSubcompaction) bool { + _, exist := cs.Rules[subCompaction.Meta.TableId] + if !exist { + log.Info("skip for no rule files", zap.Int64("tableID", subCompaction.Meta.TableId)) + return true + } + sstOutputs := make([]*backuppb.File, 0, len(subCompaction.SstOutputs)) + for _, sst := range subCompaction.SstOutputs { + if _, ok := cs.checkpointSets[sst.Name]; !ok { + sstOutputs = append(sstOutputs, sst) + } else { + // This file is recorded in the checkpoint, indicating that it has + // already been restored to the cluster. Therefore, we will skip + // processing this file and only update the statistics. + cs.checkpointFileProgressFn(sst.TotalKvs, sst.Size_) + } + } + if len(sstOutputs) == 0 { + log.Info("all files in sub compaction skipped") + return true + } + if len(sstOutputs) != len(subCompaction.SstOutputs) { + log.Info("partial files in sub compaction skipped due to checkpoint") + subCompaction.SstOutputs = sstOutputs + return false + } + return false +} diff --git a/br/pkg/restore/log_client/export_test.go b/br/pkg/restore/log_client/export_test.go index 70a15e1ad2393..9c95409c9d754 100644 --- a/br/pkg/restore/log_client/export_test.go +++ b/br/pkg/restore/log_client/export_test.go @@ -91,7 +91,7 @@ func (rc *LogFileManager) ReadStreamMeta(ctx context.Context) ([]*MetaName, erro func TEST_NewLogClient(clusterID, startTS, restoreTS, upstreamClusterID uint64, dom *domain.Domain, se glue.Session) *LogClient { return &LogClient{ dom: dom, - se: se, + unsafeSession: se, upstreamClusterID: upstreamClusterID, LogFileManager: &LogFileManager{ startTS: startTS, diff --git a/br/pkg/restore/log_client/import_retry_test.go b/br/pkg/restore/log_client/import_retry_test.go index bcde03c69c1ed..04f2a56dc342b 100644 --- a/br/pkg/restore/log_client/import_retry_test.go +++ b/br/pkg/restore/log_client/import_retry_test.go @@ -3,13 +3,11 @@ package logclient_test import ( - "bytes" "context" "encoding/hex" "fmt" "os" "strconv" - "sync" "testing" "time" @@ -18,14 +16,12 @@ import ( "github.com/pingcap/kvproto/pkg/errorpb" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/kvproto/pkg/pdpb" logclient "github.com/pingcap/tidb/br/pkg/restore/log_client" "github.com/pingcap/tidb/br/pkg/restore/split" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/store/pdtypes" "github.com/pingcap/tidb/pkg/util/codec" "github.com/stretchr/testify/require" - pd "github.com/tikv/pd/client" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -52,157 +48,20 @@ func assertRegions(t *testing.T, regions []*split.RegionInfo, keys ...string) { } } -type TestClient struct { - split.SplitClient - pd.Client - - mu sync.RWMutex - stores map[uint64]*metapb.Store - regions map[uint64]*split.RegionInfo - regionsInfo *pdtypes.RegionTree // For now it's only used in ScanRegions - nextRegionID uint64 - - scattered map[uint64]bool - InjectErr bool - InjectTimes int32 -} - -func NewTestClient( - stores map[uint64]*metapb.Store, - regions map[uint64]*split.RegionInfo, - nextRegionID uint64, -) *TestClient { - regionsInfo := &pdtypes.RegionTree{} - for _, regionInfo := range regions { - regionsInfo.SetRegion(pdtypes.NewRegionInfo(regionInfo.Region, regionInfo.Leader)) - } - return &TestClient{ - stores: stores, - regions: regions, - regionsInfo: regionsInfo, - nextRegionID: nextRegionID, - scattered: map[uint64]bool{}, - } -} - -func (c *TestClient) GetAllRegions() map[uint64]*split.RegionInfo { - c.mu.RLock() - defer c.mu.RUnlock() - return c.regions -} - -func (c *TestClient) GetPDClient() *split.FakePDClient { - stores := make([]*metapb.Store, 0, len(c.stores)) - for _, store := range c.stores { - stores = append(stores, store) - } - return split.NewFakePDClient(stores, false, nil) -} - -func (c *TestClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { - c.mu.RLock() - defer c.mu.RUnlock() - store, ok := c.stores[storeID] - if !ok { - return nil, errors.Errorf("store not found") - } - return store, nil -} - -func (c *TestClient) GetRegion(ctx context.Context, key []byte) (*split.RegionInfo, error) { - c.mu.RLock() - defer c.mu.RUnlock() - for _, region := range c.regions { - if bytes.Compare(key, region.Region.StartKey) >= 0 && - (len(region.Region.EndKey) == 0 || bytes.Compare(key, region.Region.EndKey) < 0) { - return region, nil - } - } - return nil, errors.Errorf("region not found: key=%s", string(key)) -} - -func (c *TestClient) GetRegionByID(ctx context.Context, regionID uint64) (*split.RegionInfo, error) { - c.mu.RLock() - defer c.mu.RUnlock() - region, ok := c.regions[regionID] - if !ok { - return nil, errors.Errorf("region not found: id=%d", regionID) - } - return region, nil -} - -func (c *TestClient) SplitWaitAndScatter(_ context.Context, _ *split.RegionInfo, keys [][]byte) ([]*split.RegionInfo, error) { - c.mu.Lock() - defer c.mu.Unlock() - newRegions := make([]*split.RegionInfo, 0) - for _, key := range keys { - var target *split.RegionInfo - splitKey := codec.EncodeBytes([]byte{}, key) - for _, region := range c.regions { - if region.ContainsInterior(splitKey) { - target = region - } - } - if target == nil { - continue - } - newRegion := &split.RegionInfo{ - Region: &metapb.Region{ - Peers: target.Region.Peers, - Id: c.nextRegionID, - StartKey: target.Region.StartKey, - EndKey: splitKey, - }, - } - c.regions[c.nextRegionID] = newRegion - c.nextRegionID++ - target.Region.StartKey = splitKey - c.regions[target.Region.Id] = target - newRegions = append(newRegions, newRegion) - } - return newRegions, nil -} - -func (c *TestClient) GetOperator(context.Context, uint64) (*pdpb.GetOperatorResponse, error) { - return &pdpb.GetOperatorResponse{ - Header: new(pdpb.ResponseHeader), - }, nil -} - -func (c *TestClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*split.RegionInfo, error) { - if c.InjectErr && c.InjectTimes > 0 { - c.InjectTimes -= 1 - return nil, status.Error(codes.Unavailable, "not leader") - } - if len(key) != 0 && bytes.Equal(key, endKey) { - return nil, status.Error(codes.Internal, "key and endKey are the same") - } - - infos := c.regionsInfo.ScanRange(key, endKey, limit) - regions := make([]*split.RegionInfo, 0, len(infos)) - for _, info := range infos { - regions = append(regions, &split.RegionInfo{ - Region: info.Meta, - Leader: info.Leader, - }) - } - return regions, nil -} - -func (c *TestClient) WaitRegionsScattered(context.Context, []*split.RegionInfo) (int, error) { - return 0, nil -} - // region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) -func initTestClient(isRawKv bool) *TestClient { +func initTestClient(isRawKv bool) *split.TestClient { + keys := []string{"", "aay", "bba", "bbh", "cca", ""} + stores := make(map[uint64]*metapb.Store) + stores[1] = &metapb.Store{ + Id: 1, + } peers := make([]*metapb.Peer, 1) peers[0] = &metapb.Peer{ Id: 1, StoreId: 1, } - keys := [6]string{"", "aay", "bba", "bbh", "cca", ""} regions := make(map[uint64]*split.RegionInfo) - for i := uint64(1); i < 6; i++ { + for i := 1; i < len(keys); i++ { startKey := []byte(keys[i-1]) if len(startKey) != 0 { startKey = codec.EncodeBytesExt([]byte{}, startKey, isRawKv) @@ -211,24 +70,20 @@ func initTestClient(isRawKv bool) *TestClient { if len(endKey) != 0 { endKey = codec.EncodeBytesExt([]byte{}, endKey, isRawKv) } - regions[i] = &split.RegionInfo{ + regions[uint64(i)] = &split.RegionInfo{ Leader: &metapb.Peer{ - Id: i, + Id: uint64(i), StoreId: 1, }, Region: &metapb.Region{ - Id: i, + Id: uint64(i), Peers: peers, StartKey: startKey, EndKey: endKey, }, } } - stores := make(map[uint64]*metapb.Store) - stores[1] = &metapb.Store{ - Id: 1, - } - return NewTestClient(stores, regions, 6) + return split.NewTestClient(stores, regions, 6) } func TestScanSuccess(t *testing.T) { @@ -411,7 +266,7 @@ func TestEpochNotMatch(t *testing.T) { ctl := logclient.OverRegionsInRange([]byte(""), []byte(""), cli, &rs) ctx := context.Background() - printPDRegion("cli", cli.regionsInfo.Regions) + printPDRegion("cli", cli.RegionsInfo.Regions) regions, err := split.PaginateScanRegion(ctx, cli, []byte("aaz"), []byte("bbb"), 2) require.NoError(t, err) require.Len(t, regions, 2) @@ -429,8 +284,8 @@ func TestEpochNotMatch(t *testing.T) { } newRegion := pdtypes.NewRegionInfo(info.Region, info.Leader) mergeRegion := func() { - cli.regionsInfo.SetRegion(newRegion) - cli.regions[42] = &info + cli.RegionsInfo.SetRegion(newRegion) + cli.Regions[42] = &info } epochNotMatch := &import_sstpb.Error{ Message: "Epoch not match", @@ -457,7 +312,7 @@ func TestEpochNotMatch(t *testing.T) { }) printRegion("first", firstRunRegions) printRegion("second", secondRunRegions) - printPDRegion("cli", cli.regionsInfo.Regions) + printPDRegion("cli", cli.RegionsInfo.Regions) assertRegions(t, firstRunRegions, "", "aay") assertRegions(t, secondRunRegions, "", "aay", "bbh", "cca", "") require.NoError(t, err) @@ -470,7 +325,7 @@ func TestRegionSplit(t *testing.T) { ctl := logclient.OverRegionsInRange([]byte(""), []byte(""), cli, &rs) ctx := context.Background() - printPDRegion("cli", cli.regionsInfo.Regions) + printPDRegion("cli", cli.RegionsInfo.Regions) regions, err := split.PaginateScanRegion(ctx, cli, []byte("aaz"), []byte("aazz"), 1) require.NoError(t, err) require.Len(t, regions, 1) @@ -503,8 +358,8 @@ func TestRegionSplit(t *testing.T) { splitRegion := func() { for _, r := range newRegions { newRegion := pdtypes.NewRegionInfo(r.Region, r.Leader) - cli.regionsInfo.SetRegion(newRegion) - cli.regions[r.Region.Id] = r + cli.RegionsInfo.SetRegion(newRegion) + cli.Regions[r.Region.Id] = r } } epochNotMatch := &import_sstpb.Error{ @@ -535,7 +390,7 @@ func TestRegionSplit(t *testing.T) { }) printRegion("first", firstRunRegions) printRegion("second", secondRunRegions) - printPDRegion("cli", cli.regionsInfo.Regions) + printPDRegion("cli", cli.RegionsInfo.Regions) assertRegions(t, firstRunRegions, "", "aay") assertRegions(t, secondRunRegions, "", "aay", "aayy", "bba", "bbh", "cca", "") require.NoError(t, err) @@ -548,7 +403,7 @@ func TestRetryBackoff(t *testing.T) { ctl := logclient.OverRegionsInRange([]byte(""), []byte(""), cli, &rs) ctx := context.Background() - printPDRegion("cli", cli.regionsInfo.Regions) + printPDRegion("cli", cli.RegionsInfo.Regions) regions, err := split.PaginateScanRegion(ctx, cli, []byte("aaz"), []byte("bbb"), 2) require.NoError(t, err) require.Len(t, regions, 2) @@ -569,7 +424,7 @@ func TestRetryBackoff(t *testing.T) { } return logclient.RPCResultOK() }) - printPDRegion("cli", cli.regionsInfo.Regions) + printPDRegion("cli", cli.RegionsInfo.Regions) require.Equal(t, 1, rs.Attempt()) // we retried leader not found error. so the next backoff should be 2 * initical backoff. require.Equal(t, 2*time.Millisecond, rs.ExponentialBackoff()) diff --git a/br/pkg/restore/log_client/log_file_manager.go b/br/pkg/restore/log_client/log_file_manager.go index cbaa6a594dff4..4c2992467a2ab 100644 --- a/br/pkg/restore/log_client/log_file_manager.go +++ b/br/pkg/restore/log_client/log_file_manager.go @@ -9,6 +9,7 @@ import ( "fmt" "strings" "sync" + "time" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" @@ -25,9 +26,13 @@ import ( "go.uber.org/zap" ) +var TotalEntryCount int64 + // MetaIter is the type of iterator of metadata files' content. type MetaIter = iter.TryNextor[*backuppb.Metadata] +type SubCompactionIter iter.TryNextor[*backuppb.LogFileSubcompaction] + type MetaName struct { meta Meta name string @@ -98,7 +103,8 @@ type LogFileManager struct { storage storage.ExternalStorage helper streamMetadataHelper - withmigrations WithMigrations + withMigraionBuilder *WithMigrationsBuilder + withMigrations *WithMigrations metadataDownloadBatchSize uint } @@ -109,7 +115,8 @@ type LogFileManagerInit struct { RestoreTS uint64 Storage storage.ExternalStorage - Migrations WithMigrations + MigrationsBuilder *WithMigrationsBuilder + Migrations *WithMigrations MetadataDownloadBatchSize uint EncryptionManager *encryption.Manager } @@ -123,11 +130,12 @@ type DDLMetaGroup struct { // Generally the config cannot be changed during its lifetime. func CreateLogFileManager(ctx context.Context, init LogFileManagerInit) (*LogFileManager, error) { fm := &LogFileManager{ - startTS: init.StartTS, - restoreTS: init.RestoreTS, - storage: init.Storage, - helper: stream.NewMetadataHelper(stream.WithEncryptionManager(init.EncryptionManager)), - withmigrations: init.Migrations, + startTS: init.StartTS, + restoreTS: init.RestoreTS, + storage: init.Storage, + helper: stream.NewMetadataHelper(stream.WithEncryptionManager(init.EncryptionManager)), + withMigraionBuilder: init.MigrationsBuilder, + withMigrations: init.Migrations, metadataDownloadBatchSize: init.MetadataDownloadBatchSize, } @@ -138,6 +146,11 @@ func CreateLogFileManager(ctx context.Context, init LogFileManagerInit) (*LogFil return fm, nil } +func (rc *LogFileManager) BuildMigrations(migs []*backuppb.Migration) { + w := rc.withMigraionBuilder.Build(migs) + rc.withMigrations = &w +} + func (rc *LogFileManager) ShiftTS() uint64 { return rc.shiftStartTS } @@ -171,9 +184,11 @@ func (rc *LogFileManager) loadShiftTS(ctx context.Context) error { } if !shiftTS.exists { rc.shiftStartTS = rc.startTS + rc.withMigraionBuilder.SetShiftStartTS(rc.shiftStartTS) return nil } rc.shiftStartTS = shiftTS.value + rc.withMigraionBuilder.SetShiftStartTS(rc.shiftStartTS) return nil } @@ -225,7 +240,7 @@ func (rc *LogFileManager) createMetaIterOver(ctx context.Context, s storage.Exte } func (rc *LogFileManager) FilterDataFiles(m MetaNameIter) LogIter { - ms := rc.withmigrations.Metas(m) + ms := rc.withMigrations.Metas(m) return iter.FlatMap(ms, func(m *MetaWithMigrations) LogIter { gs := m.Physicals(iter.Enumerate(iter.FromSlice(m.meta.FileGroups))) return iter.FlatMap(gs, func(gim *PhysicalWithMigrations) LogIter { @@ -266,10 +281,13 @@ func (rc *LogFileManager) collectDDLFilesAndPrepareCache( ctx context.Context, files MetaGroupIter, ) ([]Log, error) { + start := time.Now() + log.Info("start to collect all ddl files") fs := iter.CollectAll(ctx, files) if fs.Err != nil { return nil, errors.Annotatef(fs.Err, "failed to collect from files") } + log.Info("finish to collect all ddl files", zap.Duration("take", time.Since(start))) dataFileInfos := make([]*backuppb.DataFileInfo, 0) for _, g := range fs.Item { @@ -281,24 +299,12 @@ func (rc *LogFileManager) collectDDLFilesAndPrepareCache( } // LoadDDLFilesAndCountDMLFiles loads all DDL files needs to be restored in the restoration. -// At the same time, if the `counter` isn't nil, counting the DML file needs to be restored into `counter`. // This function returns all DDL files needing directly because we need sort all of them. -func (rc *LogFileManager) LoadDDLFilesAndCountDMLFiles(ctx context.Context, counter *int) ([]Log, error) { +func (rc *LogFileManager) LoadDDLFilesAndCountDMLFiles(ctx context.Context) ([]Log, error) { m, err := rc.streamingMeta(ctx) if err != nil { return nil, err } - if counter != nil { - m = iter.Tap(m, func(m *MetaName) { - for _, fg := range m.meta.FileGroups { - for _, f := range fg.DataFilesInfo { - if !f.IsMeta && !rc.ShouldFilterOut(f) { - *counter += 1 - } - } - } - }) - } mg := rc.FilterMetaFiles(m) return rc.collectDDLFilesAndPrepareCache(ctx, mg) @@ -324,7 +330,12 @@ func (rc *LogFileManager) FilterMetaFiles(ms MetaNameIter) MetaGroupIter { if m.meta.MetaVersion > backuppb.MetaVersion_V1 { d.Path = g.Path } - return !d.IsMeta || rc.ShouldFilterOut(d) + if rc.ShouldFilterOut(d) { + return true + } + // count the progress + TotalEntryCount += d.NumberOfEntries + return !d.IsMeta }) return DDLMetaGroup{ Path: g.Path, @@ -335,6 +346,11 @@ func (rc *LogFileManager) FilterMetaFiles(ms MetaNameIter) MetaGroupIter { }) } +// Fetch compactions that may contain file less than the TS. +func (rc *LogFileManager) GetCompactionIter(ctx context.Context) iter.TryNextor[*backuppb.LogFileSubcompaction] { + return rc.withMigrations.Compactions(ctx, rc.storage) +} + // the kv entry with ts, the ts is decoded from entry. type KvEntryWithTS struct { E kv.Entry @@ -418,3 +434,18 @@ func (rc *LogFileManager) ReadAllEntries( return kvEntries, nextKvEntries, nil } + +func Subcompactions(ctx context.Context, prefix string, s storage.ExternalStorage) SubCompactionIter { + return iter.FlatMap(storage.UnmarshalDir( + ctx, + &storage.WalkOption{SubDir: prefix}, + s, + func(t *backuppb.LogFileSubcompactions, name string, b []byte) error { return t.Unmarshal(b) }, + ), func(subcs *backuppb.LogFileSubcompactions) iter.TryNextor[*backuppb.LogFileSubcompaction] { + return iter.FromSlice(subcs.Subcompactions) + }) +} + +func LoadMigrations(ctx context.Context, s storage.ExternalStorage) iter.TryNextor[*backuppb.Migration] { + return storage.UnmarshalDir(ctx, &storage.WalkOption{SubDir: "v1/migrations/"}, s, func(t *backuppb.Migration, name string, b []byte) error { return t.Unmarshal(b) }) +} diff --git a/br/pkg/restore/log_client/log_file_manager_test.go b/br/pkg/restore/log_client/log_file_manager_test.go index 0ac289b65b8b0..79813d6ef78f2 100644 --- a/br/pkg/restore/log_client/log_file_manager_test.go +++ b/br/pkg/restore/log_client/log_file_manager_test.go @@ -235,6 +235,8 @@ func testReadMetaBetweenTSWithVersion(t *testing.T, m metaMaker) { RestoreTS: c.endTS, Storage: loc, + MigrationsBuilder: logclient.NewMigrationBuilder(0, c.startTS, c.endTS), + Migrations: emptyMigrations(), MetadataDownloadBatchSize: 32, } cli, err := logclient.CreateLogFileManager(ctx, init) @@ -469,6 +471,8 @@ func testFileManagerWithMeta(t *testing.T, m metaMaker) { RestoreTS: end, Storage: loc, + MigrationsBuilder: logclient.NewMigrationBuilder(0, start, end), + Migrations: emptyMigrations(), MetadataDownloadBatchSize: 32, }) req.NoError(err) @@ -487,15 +491,8 @@ func testFileManagerWithMeta(t *testing.T, m metaMaker) { ), ).Item } else { - var counter *int - if c.DMLFileCount != nil { - counter = new(int) - } - data, err := fm.LoadDDLFilesAndCountDMLFiles(ctx, counter) + data, err := fm.LoadDDLFilesAndCountDMLFiles(ctx) req.NoError(err) - if counter != nil { - req.Equal(*c.DMLFileCount, *counter) - } r = data } dataFileInfoMatches(t, r, c.Requires...) @@ -528,6 +525,7 @@ func TestFilterDataFiles(t *testing.T) { RestoreTS: 10, Storage: loc, + MigrationsBuilder: logclient.NewMigrationBuilder(0, 0, 10), Migrations: emptyMigrations(), MetadataDownloadBatchSize: 32, }) diff --git a/br/pkg/restore/log_client/log_split_strategy.go b/br/pkg/restore/log_client/log_split_strategy.go new file mode 100644 index 0000000000000..3327991db02fa --- /dev/null +++ b/br/pkg/restore/log_client/log_split_strategy.go @@ -0,0 +1,107 @@ +// Copyright 2024 PingCAP, Inc. Licensed under Apache-2.0. + +package logclient + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/pingcap/tidb/br/pkg/checkpoint" + "github.com/pingcap/tidb/br/pkg/restore/split" + restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" + "github.com/pingcap/tidb/br/pkg/summary" + "github.com/pingcap/tidb/pkg/util/sqlexec" + "go.uber.org/zap" +) + +type LogSplitStrategy struct { + *split.BaseSplitStrategy + checkpointSkipMap *LogFilesSkipMap + checkpointFileProgressFn func(uint64, uint64) +} + +var _ split.SplitStrategy[*LogDataFileInfo] = &LogSplitStrategy{} + +func NewLogSplitStrategy( + ctx context.Context, + useCheckpoint bool, + execCtx sqlexec.RestrictedSQLExecutor, + rules map[int64]*restoreutils.RewriteRules, + updateStatsFn func(uint64, uint64), +) (*LogSplitStrategy, error) { + downstreamIdset := make(map[int64]struct{}) + for _, rule := range rules { + downstreamIdset[rule.NewTableID] = struct{}{} + } + skipMap := NewLogFilesSkipMap() + if useCheckpoint { + t, err := checkpoint.LoadCheckpointDataForLogRestore( + ctx, execCtx, func(groupKey checkpoint.LogRestoreKeyType, off checkpoint.LogRestoreValueMarshaled) { + for tableID, foffs := range off.Foffs { + // filter out the checkpoint data of dropped table + if _, exists := downstreamIdset[tableID]; exists { + for _, foff := range foffs { + skipMap.Insert(groupKey, off.Goff, foff) + } + } + } + }) + + if err != nil { + return nil, errors.Trace(err) + } + summary.AdjustStartTimeToEarlierTime(t) + } + return &LogSplitStrategy{ + BaseSplitStrategy: split.NewBaseSplitStrategy(rules), + checkpointSkipMap: skipMap, + checkpointFileProgressFn: updateStatsFn, + }, nil +} + +const splitFileThreshold = 1024 * 1024 // 1 MB + +func (ls *LogSplitStrategy) Accumulate(file *LogDataFileInfo) { + if file.Length > splitFileThreshold { + ls.AccumulateCount += 1 + } + splitHelper, exist := ls.TableSplitter[file.TableId] + if !exist { + splitHelper = split.NewSplitHelper() + ls.TableSplitter[file.TableId] = splitHelper + } + + splitHelper.Merge(split.Valued{ + Key: split.Span{ + StartKey: file.StartKey, + EndKey: file.EndKey, + }, + Value: split.Value{ + Size: file.Length, + Number: file.NumberOfEntries, + }, + }) +} + +func (ls *LogSplitStrategy) ShouldSplit() bool { + return ls.AccumulateCount > 4096 +} + +func (ls *LogSplitStrategy) ShouldSkip(file *LogDataFileInfo) bool { + if file.IsMeta { + return true + } + _, exist := ls.Rules[file.TableId] + if !exist { + log.Info("skip for no rule files", zap.Int64("tableID", file.TableId)) + return true + } + + if ls.checkpointSkipMap.NeedSkip(file.MetaDataGroupName, file.OffsetInMetaGroup, file.OffsetInMergedGroup) { + //onPcheckpointSkipMaprogress() + ls.checkpointFileProgressFn(uint64(file.NumberOfEntries), file.Length) + return true + } + return false +} diff --git a/br/pkg/restore/log_client/migration.go b/br/pkg/restore/log_client/migration.go index 19d9d3daeb3cb..a7b4307e0f568 100644 --- a/br/pkg/restore/log_client/migration.go +++ b/br/pkg/restore/log_client/migration.go @@ -15,7 +15,10 @@ package logclient import ( + "context" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils/iter" ) @@ -99,6 +102,10 @@ type WithMigrationsBuilder struct { restoredTS uint64 } +func (builder *WithMigrationsBuilder) SetShiftStartTS(ts uint64) { + builder.shiftStartTS = ts +} + func (builder *WithMigrationsBuilder) updateSkipMap(skipmap metaSkipMap, metas []*backuppb.MetaEdit) { for _, meta := range metas { if meta.DestructSelf { @@ -136,14 +143,24 @@ func (builder *WithMigrationsBuilder) coarseGrainedFilter(mig *backuppb.Migratio // Create the wrapper by migrations. func (builder *WithMigrationsBuilder) Build(migs []*backuppb.Migration) WithMigrations { skipmap := make(metaSkipMap) + compactionDirs := make([]string, 0, 8) + for _, mig := range migs { // TODO: deal with TruncatedTo and DestructPrefix if builder.coarseGrainedFilter(mig) { continue } builder.updateSkipMap(skipmap, mig.EditMeta) + + for _, c := range mig.Compactions { + compactionDirs = append(compactionDirs, c.Artifacts) + } } - return WithMigrations(skipmap) + withMigrations := WithMigrations{ + skipmap: skipmap, + compactionDirs: compactionDirs, + } + return withMigrations } type PhysicalMigrationsIter = iter.TryNextor[*PhysicalWithMigrations] @@ -190,13 +207,16 @@ func (mwm *MetaWithMigrations) Physicals(groupIndexIter GroupIndexIter) Physical }) } -type WithMigrations metaSkipMap +type WithMigrations struct { + skipmap metaSkipMap + compactionDirs []string +} -func (wm WithMigrations) Metas(metaNameIter MetaNameIter) MetaMigrationsIter { +func (wm *WithMigrations) Metas(metaNameIter MetaNameIter) MetaMigrationsIter { return iter.MapFilter(metaNameIter, func(mname *MetaName) (*MetaWithMigrations, bool) { var phySkipmap physicalSkipMap = nil - if wm != nil { - skipmap := wm[mname.name] + if wm.skipmap != nil { + skipmap := wm.skipmap[mname.name] if skipmap != nil { if skipmap.skip { return nil, true @@ -210,3 +230,11 @@ func (wm WithMigrations) Metas(metaNameIter MetaNameIter) MetaMigrationsIter { }, false }) } + +func (wm *WithMigrations) Compactions(ctx context.Context, s storage.ExternalStorage) iter.TryNextor[*backuppb.LogFileSubcompaction] { + compactionDirIter := iter.FromSlice(wm.compactionDirs) + return iter.FlatMap(compactionDirIter, func(name string) iter.TryNextor[*backuppb.LogFileSubcompaction] { + // name is the absolute path in external storage. + return Subcompactions(ctx, name, s) + }) +} diff --git a/br/pkg/restore/log_client/migration_test.go b/br/pkg/restore/log_client/migration_test.go index 0dd7b06197c7c..5368d7416dadf 100644 --- a/br/pkg/restore/log_client/migration_test.go +++ b/br/pkg/restore/log_client/migration_test.go @@ -25,8 +25,8 @@ import ( "github.com/stretchr/testify/require" ) -func emptyMigrations() logclient.WithMigrations { - return logclient.WithMigrations{} +func emptyMigrations() *logclient.WithMigrations { + return &logclient.WithMigrations{} } func nameFromID(prefix string, id uint64) string { diff --git a/br/pkg/restore/restorer.go b/br/pkg/restore/restorer.go new file mode 100644 index 0000000000000..75a21b583eb1f --- /dev/null +++ b/br/pkg/restore/restorer.go @@ -0,0 +1,367 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package restore + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/pingcap/errors" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/br/pkg/checkpoint" + "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/restore/split" + "github.com/pingcap/tidb/br/pkg/restore/utils" + "github.com/pingcap/tidb/br/pkg/summary" + "github.com/pingcap/tidb/br/pkg/utils/iter" + "github.com/pingcap/tidb/pkg/util" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/sync/errgroup" +) + +// BackupFileSet represents the batch files to be restored for a table. Current, we have 5 type files +// 1. Raw KV(sst files) +// 2. Txn KV(sst files) +// 3. Database KV backup(sst files) +// 4. Compacted Log backups(sst files) +type BackupFileSet struct { + // TableID only valid in 3.4.5. + // For Raw/Txn KV, table id is always 0 + TableID int64 + + // For log Backup Changes, this field is null. + SSTFiles []*backuppb.File + + // RewriteRules is the rewrite rules for the specify table. + // because these rules belongs to the *one table*. + // we can hold them here. + RewriteRules *utils.RewriteRules +} + +type BatchBackupFileSet []BackupFileSet + +type zapBatchBackupFileSetMarshaler BatchBackupFileSet + +// MarshalLogObjectForFiles is an internal util function to zap something having `Files` field. +func MarshalLogObjectForFiles(batchFileSet BatchBackupFileSet, encoder zapcore.ObjectEncoder) error { + return zapBatchBackupFileSetMarshaler(batchFileSet).MarshalLogObject(encoder) +} + +func (fgs zapBatchBackupFileSetMarshaler) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + elements := make([]string, 0) + total := 0 + totalKVs := uint64(0) + totalBytes := uint64(0) + totalSize := uint64(0) + for _, fg := range fgs { + for _, f := range fg.SSTFiles { + total += 1 + elements = append(elements, f.GetName()) + totalKVs += f.GetTotalKvs() + totalBytes += f.GetTotalBytes() + totalSize += f.GetSize_() + } + } + encoder.AddInt("total", total) + _ = encoder.AddArray("files", logutil.AbbreviatedArrayMarshaler(elements)) + encoder.AddUint64("totalKVs", totalKVs) + encoder.AddUint64("totalBytes", totalBytes) + encoder.AddUint64("totalSize", totalSize) + return nil +} + +func ZapBatchBackupFileSet(batchFileSet BatchBackupFileSet) zap.Field { + return zap.Object("fileset", zapBatchBackupFileSetMarshaler(batchFileSet)) +} + +// CreateUniqueFileSets used for Raw/Txn non-tableID files +// converts a slice of files into a slice of unique BackupFileSets, +// where each BackupFileSet contains a single file. +func CreateUniqueFileSets(files []*backuppb.File) []BackupFileSet { + newSet := make([]BackupFileSet, len(files)) + for i, f := range files { + newSet[i].SSTFiles = []*backuppb.File{f} + } + return newSet +} + +func NewFileSet(files []*backuppb.File, rules *utils.RewriteRules) BackupFileSet { + return BackupFileSet{ + SSTFiles: files, + RewriteRules: rules, + } +} + +// SstRestorer defines the essential methods required for restoring SST files in various backup formats: +// 1. Raw backup SST files +// 2. Transactional (Txn) backup SST files +// 3. TiDB backup SST files +// 4. Log-compacted SST files +// +// It serves as a high-level interface for restoration, supporting implementations such as simpleRestorer +// and MultiTablesRestorer. SstRestorer includes FileImporter for handling raw, transactional, and compacted SSTs, +// and MultiTablesRestorer for TiDB-specific backups. +type SstRestorer interface { + // GoRestore imports the specified backup file sets into TiKV asynchronously. + // The onProgress function is called with progress updates as files are processed. + GoRestore(onProgress func(int64), batchFileSets ...BatchBackupFileSet) error + + // WaitUntilFinish blocks until all pending restore files have completed processing. + WaitUntilFinish() error + + // Close releases any resources associated with the restoration process. + Close() error +} + +// FileImporter is a low-level interface for handling the import of backup files into storage (e.g., TiKV). +// It is primarily used by the importer client to manage raw and transactional SST file imports. +type FileImporter interface { + // Import uploads and imports the provided backup file sets into storage. + // The ctx parameter provides context for managing request scope. + Import(ctx context.Context, fileSets ...BackupFileSet) error + + // Close releases any resources used by the importer client. + Close() error +} + +// BalancedFileImporter is a wrapper around FileImporter that adds concurrency controls. +// It ensures that file imports are balanced across storage nodes, which is particularly useful +// in MultiTablesRestorer scenarios where concurrency management is critical for efficiency. +type BalancedFileImporter interface { + FileImporter + + // PauseForBackpressure manages concurrency by controlling when imports can proceed, + // ensuring load is distributed evenly across storage nodes. + PauseForBackpressure() +} + +type SimpleRestorer struct { + eg *errgroup.Group + ectx context.Context + workerPool *util.WorkerPool + fileImporter FileImporter + checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType] +} + +func NewSimpleSstRestorer( + ctx context.Context, + fileImporter FileImporter, + workerPool *util.WorkerPool, + checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType], +) SstRestorer { + eg, ectx := errgroup.WithContext(ctx) + return &SimpleRestorer{ + eg: eg, + ectx: ectx, + workerPool: workerPool, + fileImporter: fileImporter, + checkpointRunner: checkpointRunner, + } +} + +func (s *SimpleRestorer) Close() error { + return s.fileImporter.Close() +} + +func (s *SimpleRestorer) WaitUntilFinish() error { + return s.eg.Wait() +} + +func (s *SimpleRestorer) GoRestore(onProgress func(int64), batchFileSets ...BatchBackupFileSet) error { + for _, sets := range batchFileSets { + for _, set := range sets { + s.workerPool.ApplyOnErrorGroup(s.eg, + func() (restoreErr error) { + fileStart := time.Now() + defer func() { + if restoreErr == nil { + log.Info("import sst files done", logutil.Files(set.SSTFiles), + zap.Duration("take", time.Since(fileStart))) + for _, f := range set.SSTFiles { + onProgress(int64(f.TotalKvs)) + } + } + }() + err := s.fileImporter.Import(s.ectx, set) + if err != nil { + return errors.Trace(err) + } + // TODO handle checkpoint + return nil + }) + } + } + return nil +} + +type MultiTablesRestorer struct { + eg *errgroup.Group + ectx context.Context + workerPool *util.WorkerPool + fileImporter BalancedFileImporter + checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType] +} + +func NewMultiTablesRestorer( + ctx context.Context, + fileImporter BalancedFileImporter, + workerPool *util.WorkerPool, + checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType], +) SstRestorer { + eg, ectx := errgroup.WithContext(ctx) + return &MultiTablesRestorer{ + eg: eg, + ectx: ectx, + workerPool: workerPool, + fileImporter: fileImporter, + checkpointRunner: checkpointRunner, + } +} + +func (m *MultiTablesRestorer) Close() error { + return m.fileImporter.Close() +} + +func (m *MultiTablesRestorer) WaitUntilFinish() error { + if err := m.eg.Wait(); err != nil { + summary.CollectFailureUnit("file", err) + log.Error("restore files failed", zap.Error(err)) + return errors.Trace(err) + } + return nil +} + +func (m *MultiTablesRestorer) GoRestore(onProgress func(int64), batchFileSets ...BatchBackupFileSet) (err error) { + start := time.Now() + fileCount := 0 + defer func() { + elapsed := time.Since(start) + if err == nil { + log.Info("Restore files", zap.Duration("take", elapsed)) + summary.CollectSuccessUnit("files", fileCount, elapsed) + } + }() + + log.Debug("start to restore files", zap.Int("files", fileCount)) + + if span := opentracing.SpanFromContext(m.ectx); span != nil && span.Tracer() != nil { + span1 := span.Tracer().StartSpan("Client.RestoreSSTFiles", opentracing.ChildOf(span.Context())) + defer span1.Finish() + m.ectx = opentracing.ContextWithSpan(m.ectx, span1) + } + + for _, batchFileSet := range batchFileSets { + if m.ectx.Err() != nil { + log.Warn("Restoring encountered error and already stopped, give up remained files.", + logutil.ShortError(m.ectx.Err())) + // We will fetch the error from the errgroup then (If there were). + // Also note if the parent context has been canceled or something, + // breaking here directly is also a reasonable behavior. + break + } + filesReplica := batchFileSet + m.fileImporter.PauseForBackpressure() + m.workerPool.ApplyOnErrorGroup(m.eg, func() (restoreErr error) { + fileStart := time.Now() + defer func() { + if restoreErr == nil { + log.Info("import files done", zap.Duration("take", time.Since(fileStart))) + onProgress(int64(len(filesReplica))) + } + }() + if importErr := m.fileImporter.Import(m.ectx, filesReplica...); importErr != nil { + return errors.Trace(importErr) + } + + // the data of this range has been import done + if m.checkpointRunner != nil && len(filesReplica) > 0 { + for _, filesGroup := range filesReplica { + rangeKeySet := make(map[string]struct{}) + for _, file := range filesGroup.SSTFiles { + rangeKey := GetFileRangeKey(file.Name) + // Assert that the files having the same rangeKey are all in the current filesGroup.Files + rangeKeySet[rangeKey] = struct{}{} + } + for rangeKey := range rangeKeySet { + // The checkpoint range shows this ranges of kvs has been restored into + // the table corresponding to the table-id. + if err := checkpoint.AppendRangesForRestore(m.ectx, m.checkpointRunner, filesGroup.TableID, rangeKey); err != nil { + return errors.Trace(err) + } + } + } + } + return nil + }) + } + // Once the parent context canceled and there is no task running in the errgroup, + // we may break the for loop without error in the errgroup. (Will this happen?) + // At that time, return the error in the context here. + return m.ectx.Err() +} + +func GetFileRangeKey(f string) string { + // the backup date file pattern is `{store_id}_{region_id}_{epoch_version}_{key}_{ts}_{cf}.sst` + // so we need to compare with out the `_{cf}.sst` suffix + idx := strings.LastIndex(f, "_") + if idx < 0 { + panic(fmt.Sprintf("invalid backup data file name: '%s'", f)) + } + + return f[:idx] +} + +type PipelineRestorerWrapper[T any] struct { + split.PipelineRegionsSplitter +} + +// WithSplit processes items using a split strategy within a pipeline. +// It iterates over items, accumulating them until a split condition is met. +// When a split is required, it executes the split operation on the accumulated items. +func (p *PipelineRestorerWrapper[T]) WithSplit(ctx context.Context, i iter.TryNextor[T], strategy split.SplitStrategy[T]) iter.TryNextor[T] { + return iter.TryMap( + iter.FilterOut(i, func(item T) bool { + // Skip items based on the strategy's criteria. + // Non-skip iterms should be filter out. + return strategy.ShouldSkip(item) + }), func(item T) (T, error) { + // Accumulate the item for potential splitting. + strategy.Accumulate(item) + + // Check if the accumulated items meet the criteria for splitting. + if strategy.ShouldSplit() { + log.Info("Trying to start region split with accumulations") + startTime := time.Now() + + // Execute the split operation on the accumulated items. + accumulations := strategy.GetAccumulations() + err := p.ExecuteRegions(ctx, accumulations) + if err != nil { + log.Error("Failed to split regions in pipeline; exit restore", zap.Error(err), zap.Duration("duration", time.Since(startTime))) + return item, errors.Annotate(err, "Execute region split on accmulated files failed") + } + // Reset accumulations after the split operation. + strategy.ResetAccumulations() + log.Info("Completed region split in pipeline", zap.Duration("duration", time.Since(startTime))) + } + // Return the item without filtering it out. + return item, nil + }) +} diff --git a/br/pkg/restore/restorer_test.go b/br/pkg/restore/restorer_test.go new file mode 100644 index 0000000000000..8e26245dabf06 --- /dev/null +++ b/br/pkg/restore/restorer_test.go @@ -0,0 +1,280 @@ +// Copyright 2024 PingCAP, Inc. +package restore_test + +import ( + "context" + "testing" + + "github.com/pingcap/errors" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/split" + restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" + "github.com/pingcap/tidb/br/pkg/utils/iter" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/codec" + "github.com/stretchr/testify/require" +) + +// Helper function to create test files +func createTestFiles() []*backuppb.File { + return []*backuppb.File{ + {Name: "file1.sst", TotalKvs: 10}, + {Name: "file2.sst", TotalKvs: 20}, + } +} + +type fakeImporter struct { + restore.FileImporter + hasError bool +} + +func (f *fakeImporter) Import(ctx context.Context, fileSets ...restore.BackupFileSet) error { + if f.hasError { + return errors.New("import error") + } + return nil +} + +func (f *fakeImporter) Close() error { + return nil +} + +func TestSimpleRestorerImportAndProgress(t *testing.T) { + ctx := context.Background() + files := createTestFiles() + progressCount := int64(0) + + workerPool := util.NewWorkerPool(2, "simple-restorer") + restorer := restore.NewSimpleSstRestorer(ctx, &fakeImporter{}, workerPool, nil) + + fileSet := restore.BatchBackupFileSet{ + {SSTFiles: files}, + } + err := restorer.GoRestore(func(progress int64) { + progressCount += progress + }, fileSet) + require.NoError(t, err) + err = restorer.WaitUntilFinish() + require.Equal(t, int64(30), progressCount) + require.NoError(t, err) + + batchFileSet := restore.BatchBackupFileSet{ + {SSTFiles: files}, + {SSTFiles: files}, + } + progressCount = int64(0) + err = restorer.GoRestore(func(progress int64) { + progressCount += progress + }, batchFileSet) + require.NoError(t, err) + err = restorer.WaitUntilFinish() + require.NoError(t, err) + require.Equal(t, int64(60), progressCount) +} + +func TestSimpleRestorerWithErrorInImport(t *testing.T) { + ctx := context.Background() + + workerPool := util.NewWorkerPool(2, "simple-restorer") + restorer := restore.NewSimpleSstRestorer(ctx, &fakeImporter{hasError: true}, workerPool, nil) + + files := []*backuppb.File{ + {Name: "file_with_error.sst", TotalKvs: 15}, + } + fileSet := restore.BatchBackupFileSet{ + {SSTFiles: files}, + } + + // Run restore and expect an error + progressCount := int64(0) + restorer.GoRestore(func(progress int64) {}, fileSet) + err := restorer.WaitUntilFinish() + require.Error(t, err) + require.Contains(t, err.Error(), "import error") + require.Equal(t, int64(0), progressCount) +} + +func createSampleBatchFileSets() restore.BatchBackupFileSet { + return restore.BatchBackupFileSet{ + { + TableID: 1001, + SSTFiles: []*backuppb.File{ + {Name: "file1.sst", TotalKvs: 10}, + {Name: "file2.sst", TotalKvs: 20}, + }, + }, + { + TableID: 1002, + SSTFiles: []*backuppb.File{ + {Name: "file3.sst", TotalKvs: 15}, + }, + }, + } +} + +// FakeBalancedFileImporteris a minimal implementation for testing +type FakeBalancedFileImporter struct { + hasError bool + unblockCount int +} + +func (f *FakeBalancedFileImporter) Import(ctx context.Context, fileSets ...restore.BackupFileSet) error { + if f.hasError { + return errors.New("import error") + } + return nil +} + +func (f *FakeBalancedFileImporter) PauseForBackpressure() { + f.unblockCount++ +} + +func (f *FakeBalancedFileImporter) Close() error { + return nil +} + +func TestMultiTablesRestorerRestoreSuccess(t *testing.T) { + ctx := context.Background() + importer := &FakeBalancedFileImporter{} + workerPool := util.NewWorkerPool(2, "multi-tables-restorer") + + restorer := restore.NewMultiTablesRestorer(ctx, importer, workerPool, nil) + + var progress int64 + fileSets := createSampleBatchFileSets() + + restorer.GoRestore(func(p int64) { progress += p }, fileSets) + err := restorer.WaitUntilFinish() + require.NoError(t, err) + + // Ensure progress was tracked correctly + require.Equal(t, int64(2), progress) // Total files group: 2 + require.Equal(t, 1, importer.unblockCount) +} + +func TestMultiTablesRestorerRestoreWithImportError(t *testing.T) { + ctx := context.Background() + importer := &FakeBalancedFileImporter{hasError: true} + workerPool := util.NewWorkerPool(2, "multi-tables-restorer") + + restorer := restore.NewMultiTablesRestorer(ctx, importer, workerPool, nil) + fileSets := createSampleBatchFileSets() + + restorer.GoRestore(func(int64) {}, fileSets) + err := restorer.WaitUntilFinish() + require.Error(t, err) + require.Contains(t, err.Error(), "import error") +} + +func TestMultiTablesRestorerRestoreWithContextCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + importer := &FakeBalancedFileImporter{} + workerPool := util.NewWorkerPool(2, "multi-tables-restorer") + + restorer := restore.NewMultiTablesRestorer(ctx, importer, workerPool, nil) + + fileSets := createSampleBatchFileSets() + + // Cancel context before restore completes + cancel() + err := restorer.GoRestore(func(int64) {}, fileSets) + require.ErrorIs(t, err, context.Canceled) +} + +// FakeSplitStrategy implements split.SplitStrategy for testing purposes +type FakeSplitStrategy[T any] struct { + shouldSplit bool + accumulated []T +} + +// ShouldSkip determines if a given item should be skipped. For testing, this is hardcoded to `false`. +func (f *FakeSplitStrategy[T]) ShouldSkip(item T) bool { + return false +} + +// Accumulate adds a new item to the accumulated list. +func (f *FakeSplitStrategy[T]) Accumulate(item T) { + f.accumulated = append(f.accumulated, item) +} + +// ShouldSplit returns whether the accumulated items meet the condition for splitting. +func (f *FakeSplitStrategy[T]) ShouldSplit() bool { + return f.shouldSplit +} + +// ResetAccumulations clears the accumulated items. +func (f *FakeSplitStrategy[T]) ResetAccumulations() { + f.accumulated = []T{} +} + +// GetAccumulations returns an iterator for the accumulated items. +func (f *FakeSplitStrategy[T]) GetAccumulations() *split.SplitHelperIterator { + rewrites, ok := any(f.accumulated).([]*split.RewriteSplitter) + if !ok { + panic("GetAccumulations called with non-*split.RewriteSplitter type") + } + return split.NewSplitHelperIterator(rewrites) +} + +// FakeRegionsSplitter is a mock of the RegionsSplitter that records calls to ExecuteRegions +type FakeRegionsSplitter struct { + split.Splitter + executedSplitsCount int + expectedEndKeys [][]byte +} + +func (f *FakeRegionsSplitter) ExecuteRegions(ctx context.Context, items *split.SplitHelperIterator) error { + items.Traverse(func(v split.Valued, endKey []byte, rule *restoreutils.RewriteRules) bool { + f.expectedEndKeys = append(f.expectedEndKeys, endKey) + return true + }) + f.executedSplitsCount += 1 + return nil +} + +func TestWithSplitWithoutTriggersSplit(t *testing.T) { + ctx := context.Background() + fakeSplitter := &FakeRegionsSplitter{ + executedSplitsCount: 0, + } + strategy := &FakeSplitStrategy[string]{shouldSplit: false} + wrapper := &restore.PipelineRestorerWrapper[string]{PipelineRegionsSplitter: fakeSplitter} + + items := iter.FromSlice([]string{"item1", "item2", "item3"}) + splitIter := wrapper.WithSplit(ctx, items, strategy) + + for i := splitIter.TryNext(ctx); !i.Finished; i = splitIter.TryNext(ctx) { + } + + require.Equal(t, fakeSplitter.executedSplitsCount, 0) +} +func TestWithSplitAccumulateAndReset(t *testing.T) { + ctx := context.Background() + fakeSplitter := &FakeRegionsSplitter{} + strategy := &FakeSplitStrategy[*split.RewriteSplitter]{shouldSplit: true} + wrapper := &restore.PipelineRestorerWrapper[*split.RewriteSplitter]{PipelineRegionsSplitter: fakeSplitter} + + // Create RewriteSplitter items + items := iter.FromSlice([]*split.RewriteSplitter{ + split.NewRewriteSpliter([]byte("t_1"), 1, nil, split.NewSplitHelper()), + split.NewRewriteSpliter([]byte("t_2"), 2, nil, split.NewSplitHelper()), + }) + splitIter := wrapper.WithSplit(ctx, items, strategy) + + // Traverse through the split iterator + for i := splitIter.TryNext(ctx); !i.Finished; i = splitIter.TryNext(ctx) { + } + + endKeys := [][]byte{ + codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(2)), + codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(3)), + } + + // Verify that the split happened and the accumulation was reset + require.ElementsMatch(t, endKeys, fakeSplitter.expectedEndKeys) + require.Equal(t, 2, fakeSplitter.executedSplitsCount) + require.Empty(t, strategy.accumulated) +} diff --git a/br/pkg/restore/snap_client/BUILD.bazel b/br/pkg/restore/snap_client/BUILD.bazel index d77985e92b808..b9abcd2e99f7d 100644 --- a/br/pkg/restore/snap_client/BUILD.bazel +++ b/br/pkg/restore/snap_client/BUILD.bazel @@ -65,7 +65,6 @@ go_library( "@org_golang_x_sync//errgroup", "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", - "@org_uber_go_zap//zapcore", ], ) diff --git a/br/pkg/restore/snap_client/client.go b/br/pkg/restore/snap_client/client.go index 957ec300cff94..a7e0ecab3d230 100644 --- a/br/pkg/restore/snap_client/client.go +++ b/br/pkg/restore/snap_client/client.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/checkpoint" "github.com/pingcap/tidb/br/pkg/checksum" @@ -66,15 +67,16 @@ const ( strictPlacementPolicyMode = "STRICT" ignorePlacementPolicyMode = "IGNORE" - defaultDDLConcurrency = 100 - maxSplitKeysOnce = 10240 + resetSpeedLimitRetryTimes = 3 + defaultDDLConcurrency = 100 + maxSplitKeysOnce = 10240 ) const minBatchDdlSize = 1 type SnapClient struct { + restorer restore.SstRestorer // Tool clients used by SnapClient - fileImporter *SnapFileImporter pdClient pd.Client pdHTTPClient pdhttp.Client @@ -91,8 +93,7 @@ type SnapClient struct { supportPolicy bool workerPool *tidbutil.WorkerPool - noSchema bool - hasSpeedLimited bool + noSchema bool databases map[string]*metautil.Database ddlJobs []*model.Job @@ -167,6 +168,10 @@ func NewRestoreClient( } } +func (rc *SnapClient) GetRestorer() restore.SstRestorer { + return rc.restorer +} + func (rc *SnapClient) closeConn() { // rc.db can be nil in raw kv mode. if rc.db != nil { @@ -182,8 +187,10 @@ func (rc *SnapClient) Close() { // close the connection, and it must be succeed when in SQL mode. rc.closeConn() - if err := rc.fileImporter.Close(); err != nil { - log.Warn("failed to close file importer") + if rc.restorer != nil { + if err := rc.restorer.Close(); err != nil { + log.Warn("failed to close file restorer") + } } log.Info("Restore client closed") @@ -339,18 +346,18 @@ func (rc *SnapClient) InitCheckpoint( } // t1 is the latest time the checkpoint ranges persisted to the external storage. - t1, err := checkpoint.LoadCheckpointDataForSnapshotRestore(ctx, execCtx, func(tableID int64, rangeKey checkpoint.RestoreValueType) { + t1, err := checkpoint.LoadCheckpointDataForSnapshotRestore(ctx, execCtx, func(tableID int64, v checkpoint.RestoreValueType) { checkpointSet, exists := checkpointSetWithTableID[tableID] if !exists { checkpointSet = make(map[string]struct{}) checkpointSetWithTableID[tableID] = checkpointSet } - checkpointSet[rangeKey.RangeKey] = struct{}{} + checkpointSet[v.RangeKey] = struct{}{} }) if err != nil { return checkpointSetWithTableID, nil, errors.Trace(err) } - // t2 is the latest time the checkpoint checksum persisted to the external storage. + checkpointChecksum, t2, err := checkpoint.LoadCheckpointChecksumForRestore(ctx, execCtx) if err != nil { return checkpointSetWithTableID, nil, errors.Trace(err) @@ -445,7 +452,30 @@ func (rc *SnapClient) Init(g glue.Glue, store kv.Storage) error { return errors.Trace(err) } -func (rc *SnapClient) initClients(ctx context.Context, backend *backuppb.StorageBackend, isRawKvMode bool, isTxnKvMode bool) error { +func SetSpeedLimitFn(ctx context.Context, stores []*metapb.Store, pool *tidbutil.WorkerPool) func(*SnapFileImporter, uint64) error { + return func(importer *SnapFileImporter, limit uint64) error { + eg, ectx := errgroup.WithContext(ctx) + for _, store := range stores { + if err := ectx.Err(); err != nil { + return errors.Trace(err) + } + + finalStore := store + pool.ApplyOnErrorGroup(eg, + func() error { + err := importer.SetDownloadSpeedLimit(ectx, finalStore.GetId(), limit) + if err != nil { + return errors.Trace(err) + } + return nil + }) + } + return eg.Wait() + } +} + +func (rc *SnapClient) initClients(ctx context.Context, backend *backuppb.StorageBackend, isRawKvMode bool, isTxnKvMode bool, + RawStartKey, RawEndKey []byte) error { stores, err := conn.GetAllTiKVStoresWithRetry(ctx, rc.pdClient, util.SkipTiFlash) if err != nil { return errors.Annotate(err, "failed to get stores") @@ -453,15 +483,73 @@ func (rc *SnapClient) initClients(ctx context.Context, backend *backuppb.Storage rc.storeCount = len(stores) rc.updateConcurrency() + var createCallBacks []func(*SnapFileImporter) error + var closeCallBacks []func(*SnapFileImporter) error var splitClientOpts []split.ClientOptionalParameter if isRawKvMode { splitClientOpts = append(splitClientOpts, split.WithRawKV()) + createCallBacks = append(createCallBacks, func(importer *SnapFileImporter) error { + return importer.SetRawRange(RawStartKey, RawEndKey) + }) + } + createCallBacks = append(createCallBacks, func(importer *SnapFileImporter) error { + return importer.CheckMultiIngestSupport(ctx, stores) + }) + if rc.rateLimit != 0 { + setFn := SetSpeedLimitFn(ctx, stores, rc.workerPool) + createCallBacks = append(createCallBacks, func(importer *SnapFileImporter) error { + return setFn(importer, rc.rateLimit) + }) + closeCallBacks = append(closeCallBacks, func(importer *SnapFileImporter) error { + // In future we may need a mechanism to set speed limit in ttl. like what we do in switchmode. TODO + var resetErr error + for retry := 0; retry < resetSpeedLimitRetryTimes; retry++ { + resetErr = setFn(importer, 0) + if resetErr != nil { + log.Warn("failed to reset speed limit, retry it", + zap.Int("retry time", retry), logutil.ShortError(resetErr)) + time.Sleep(time.Duration(retry+3) * time.Second) + continue + } + break + } + if resetErr != nil { + log.Error("failed to reset speed limit, please reset it manually", zap.Error(resetErr)) + } + return resetErr + }) } metaClient := split.NewClient(rc.pdClient, rc.pdHTTPClient, rc.tlsConf, maxSplitKeysOnce, rc.storeCount+1, splitClientOpts...) importCli := importclient.NewImportClient(metaClient, rc.tlsConf, rc.keepaliveConf) - rc.fileImporter, err = NewSnapFileImporter(ctx, metaClient, importCli, backend, isRawKvMode, isTxnKvMode, stores, rc.rewriteMode, rc.concurrencyPerStore) - return errors.Trace(err) + + var fileImporter *SnapFileImporter + opt := NewSnapFileImporterOptions( + rc.cipher, metaClient, importCli, backend, + rc.rewriteMode, stores, rc.concurrencyPerStore, createCallBacks, closeCallBacks, + ) + if isRawKvMode || isTxnKvMode { + mode := Raw + if isTxnKvMode { + mode = Txn + } + // for raw/txn mode. use backupMeta.ApiVersion to create fileImporter + fileImporter, err = NewSnapFileImporter(ctx, rc.backupMeta.ApiVersion, mode, opt) + if err != nil { + return errors.Trace(err) + } + // Raw/Txn restore are not support checkpoint for now + rc.restorer = restore.NewSimpleSstRestorer(ctx, fileImporter, rc.workerPool, nil) + } else { + // or create a fileImporter with the cluster API version + fileImporter, err = NewSnapFileImporter( + ctx, rc.dom.Store().GetCodec().GetAPIVersion(), TiDBFull, opt) + if err != nil { + return errors.Trace(err) + } + rc.restorer = restore.NewMultiTablesRestorer(ctx, fileImporter, rc.workerPool, rc.checkpointRunner) + } + return nil } func (rc *SnapClient) needLoadSchemas(backupMeta *backuppb.BackupMeta) bool { @@ -474,7 +562,10 @@ func (rc *SnapClient) LoadSchemaIfNeededAndInitClient( backupMeta *backuppb.BackupMeta, backend *backuppb.StorageBackend, reader *metautil.MetaReader, - loadStats bool) error { + loadStats bool, + RawStartKey []byte, + RawEndKey []byte, +) error { if rc.needLoadSchemas(backupMeta) { databases, err := metautil.LoadBackupTables(c, reader, loadStats) if err != nil { @@ -499,7 +590,7 @@ func (rc *SnapClient) LoadSchemaIfNeededAndInitClient( rc.backupMeta = backupMeta log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) - return rc.initClients(c, backend, backupMeta.IsRawKv, backupMeta.IsTxnKv) + return rc.initClients(c, backend, backupMeta.IsRawKv, backupMeta.IsTxnKv, RawStartKey, RawEndKey) } // IsRawKvMode checks whether the backup data is in raw kv format, in which case transactional recover is forbidden. @@ -949,47 +1040,6 @@ func (rc *SnapClient) ExecDDLs(ctx context.Context, ddlJobs []*model.Job) error return nil } -func (rc *SnapClient) ResetSpeedLimit(ctx context.Context) error { - rc.hasSpeedLimited = false - err := rc.setSpeedLimit(ctx, 0) - if err != nil { - return errors.Trace(err) - } - return nil -} - -func (rc *SnapClient) setSpeedLimit(ctx context.Context, rateLimit uint64) error { - if !rc.hasSpeedLimited { - stores, err := util.GetAllTiKVStores(ctx, rc.pdClient, util.SkipTiFlash) - if err != nil { - return errors.Trace(err) - } - - eg, ectx := errgroup.WithContext(ctx) - for _, store := range stores { - if err := ectx.Err(); err != nil { - return errors.Trace(err) - } - - finalStore := store - rc.workerPool.ApplyOnErrorGroup(eg, - func() error { - err := rc.fileImporter.SetDownloadSpeedLimit(ectx, finalStore.GetId(), rateLimit) - if err != nil { - return errors.Trace(err) - } - return nil - }) - } - - if err := eg.Wait(); err != nil { - return errors.Trace(err) - } - rc.hasSpeedLimited = true - } - return nil -} - func (rc *SnapClient) execAndValidateChecksum( ctx context.Context, tbl *CreatedTable, @@ -1068,54 +1118,3 @@ func (rc *SnapClient) execAndValidateChecksum( logger.Info("success in validating checksum") return nil } - -func (rc *SnapClient) WaitForFilesRestored(ctx context.Context, files []*backuppb.File, updateCh glue.Progress) error { - errCh := make(chan error, len(files)) - eg, ectx := errgroup.WithContext(ctx) - defer close(errCh) - - for _, file := range files { - fileReplica := file - rc.workerPool.ApplyOnErrorGroup(eg, - func() error { - defer func() { - log.Info("import sst files done", logutil.Files(files)) - updateCh.Inc() - }() - return rc.fileImporter.ImportSSTFiles(ectx, []TableIDWithFiles{{Files: []*backuppb.File{fileReplica}, RewriteRules: restoreutils.EmptyRewriteRule()}}, rc.cipher, rc.backupMeta.ApiVersion) - }) - } - if err := eg.Wait(); err != nil { - return errors.Trace(err) - } - return nil -} - -// RestoreRaw tries to restore raw keys in the specified range. -func (rc *SnapClient) RestoreRaw( - ctx context.Context, startKey []byte, endKey []byte, files []*backuppb.File, updateCh glue.Progress, -) error { - start := time.Now() - defer func() { - elapsed := time.Since(start) - log.Info("Restore Raw", - logutil.Key("startKey", startKey), - logutil.Key("endKey", endKey), - zap.Duration("take", elapsed)) - }() - err := rc.fileImporter.SetRawRange(startKey, endKey) - if err != nil { - return errors.Trace(err) - } - - err = rc.WaitForFilesRestored(ctx, files, updateCh) - if err != nil { - return errors.Trace(err) - } - log.Info( - "finish to restore raw range", - logutil.Key("startKey", startKey), - logutil.Key("endKey", endKey), - ) - return nil -} diff --git a/br/pkg/restore/snap_client/client_test.go b/br/pkg/restore/snap_client/client_test.go index dd919646ddf61..4b96877949e23 100644 --- a/br/pkg/restore/snap_client/client_test.go +++ b/br/pkg/restore/snap_client/client_test.go @@ -314,7 +314,7 @@ func TestSetSpeedLimit(t *testing.T) { recordStores = NewRecordStores() start := time.Now() - err := snapclient.MockCallSetSpeedLimit(ctx, FakeImporterClient{}, client, 10) + err := snapclient.MockCallSetSpeedLimit(ctx, mockStores, FakeImporterClient{}, client, 10) cost := time.Since(start) require.NoError(t, err) @@ -337,7 +337,7 @@ func TestSetSpeedLimit(t *testing.T) { split.NewFakePDClient(mockStores, false, nil), nil, nil, split.DefaultTestKeepaliveCfg) // Concurrency needs to be less than the number of stores - err = snapclient.MockCallSetSpeedLimit(ctx, FakeImporterClient{}, client, 2) + err = snapclient.MockCallSetSpeedLimit(ctx, mockStores, FakeImporterClient{}, client, 2) require.Error(t, err) t.Log(err) diff --git a/br/pkg/restore/snap_client/export_test.go b/br/pkg/restore/snap_client/export_test.go index 22b8868217933..09a47b843e279 100644 --- a/br/pkg/restore/snap_client/export_test.go +++ b/br/pkg/restore/snap_client/export_test.go @@ -20,7 +20,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/metautil" + "github.com/pingcap/tidb/br/pkg/restore" importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/pkg/domain" @@ -46,15 +49,26 @@ func MockClient(dbs map[string]*metautil.Database) *SnapClient { } // Mock the call of setSpeedLimit function -func MockCallSetSpeedLimit(ctx context.Context, fakeImportClient importclient.ImporterClient, rc *SnapClient, concurrency uint) (err error) { +func MockCallSetSpeedLimit(ctx context.Context, stores []*metapb.Store, fakeImportClient importclient.ImporterClient, rc *SnapClient, concurrency uint) (err error) { rc.SetRateLimit(42) rc.workerPool = tidbutil.NewWorkerPool(128, "set-speed-limit") - rc.hasSpeedLimited = false - rc.fileImporter, err = NewSnapFileImporter(ctx, nil, fakeImportClient, nil, false, false, nil, rc.rewriteMode, 128) + setFn := SetSpeedLimitFn(ctx, stores, rc.workerPool) + var createCallBacks []func(*SnapFileImporter) error + var closeCallBacks []func(*SnapFileImporter) error + + createCallBacks = append(createCallBacks, func(importer *SnapFileImporter) error { + return setFn(importer, rc.rateLimit) + }) + closeCallBacks = append(createCallBacks, func(importer *SnapFileImporter) error { + return setFn(importer, 0) + }) + opt := NewSnapFileImporterOptions(nil, nil, fakeImportClient, nil, rc.rewriteMode, nil, 128, createCallBacks, closeCallBacks) + fileImporter, err := NewSnapFileImporter(ctx, kvrpcpb.APIVersion(0), TiDBFull, opt) + rc.restorer = restore.NewSimpleSstRestorer(ctx, fileImporter, rc.workerPool, nil) if err != nil { return errors.Trace(err) } - return rc.setSpeedLimit(ctx, rc.rateLimit) + return nil } // CreateTables creates multiple tables, and returns their rewrite rules. diff --git a/br/pkg/restore/snap_client/import.go b/br/pkg/restore/snap_client/import.go index a335f651977d7..4e71c6fbe0cc4 100644 --- a/br/pkg/restore/snap_client/import.go +++ b/br/pkg/restore/snap_client/import.go @@ -34,6 +34,7 @@ import ( "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/restore" importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" "github.com/pingcap/tidb/br/pkg/restore/split" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" @@ -52,9 +53,10 @@ import ( type KvMode int const ( - TiDB KvMode = iota + TiDBFull KvMode = iota Raw Txn + TiDBCompcated ) const ( @@ -129,6 +131,9 @@ func newStoreTokenChannelMap(stores []*metapb.Store, bufferSize uint) *storeToke } type SnapFileImporter struct { + cipher *backuppb.CipherInfo + apiVersion kvrpcpb.APIVersion + metaClient split.SplitClient importClient importclient.ImporterClient backend *backuppb.StorageBackend @@ -136,6 +141,8 @@ type SnapFileImporter struct { downloadTokensMap *storeTokenChannelMap ingestTokensMap *storeTokenChannelMap + closeCallbacks []func(*SnapFileImporter) error + concurrencyPerStore uint kvMode KvMode @@ -147,43 +154,92 @@ type SnapFileImporter struct { cond *sync.Cond } -func NewSnapFileImporter( - ctx context.Context, +type SnapFileImporterOptions struct { + cipher *backuppb.CipherInfo + metaClient split.SplitClient + importClient importclient.ImporterClient + backend *backuppb.StorageBackend + rewriteMode RewriteMode + tikvStores []*metapb.Store + + concurrencyPerStore uint + createCallBacks []func(*SnapFileImporter) error + closeCallbacks []func(*SnapFileImporter) error +} + +func NewSnapFileImporterOptions( + cipher *backuppb.CipherInfo, metaClient split.SplitClient, importClient importclient.ImporterClient, backend *backuppb.StorageBackend, - isRawKvMode bool, - isTxnKvMode bool, - tikvStores []*metapb.Store, rewriteMode RewriteMode, + tikvStores []*metapb.Store, concurrencyPerStore uint, -) (*SnapFileImporter, error) { - kvMode := TiDB - if isRawKvMode { - kvMode = Raw - } - if isTxnKvMode { - kvMode = Txn - } - - fileImporter := &SnapFileImporter{ + createCallbacks []func(*SnapFileImporter) error, + closeCallbacks []func(*SnapFileImporter) error, +) *SnapFileImporterOptions { + return &SnapFileImporterOptions{ + cipher: cipher, metaClient: metaClient, + importClient: importClient, backend: backend, + rewriteMode: rewriteMode, + tikvStores: tikvStores, + concurrencyPerStore: concurrencyPerStore, + createCallBacks: createCallbacks, + closeCallbacks: closeCallbacks, + } +} + +func NewSnapFileImporterOptionsForTest( + splitClient split.SplitClient, + importClient importclient.ImporterClient, + tikvStores []*metapb.Store, + rewriteMode RewriteMode, + concurrencyPerStore uint, +) *SnapFileImporterOptions { + return &SnapFileImporterOptions{ + metaClient: splitClient, importClient: importClient, - downloadTokensMap: newStoreTokenChannelMap(tikvStores, concurrencyPerStore), - ingestTokensMap: newStoreTokenChannelMap(tikvStores, concurrencyPerStore), - kvMode: kvMode, + tikvStores: tikvStores, rewriteMode: rewriteMode, - cacheKey: fmt.Sprintf("BR-%s-%d", time.Now().Format("20060102150405"), rand.Int63()), concurrencyPerStore: concurrencyPerStore, + } +} + +func NewSnapFileImporter( + ctx context.Context, + apiVersion kvrpcpb.APIVersion, + kvMode KvMode, + options *SnapFileImporterOptions, +) (*SnapFileImporter, error) { + fileImporter := &SnapFileImporter{ + apiVersion: apiVersion, + kvMode: kvMode, + + cipher: options.cipher, + metaClient: options.metaClient, + backend: options.backend, + importClient: options.importClient, + downloadTokensMap: newStoreTokenChannelMap(options.tikvStores, options.concurrencyPerStore), + ingestTokensMap: newStoreTokenChannelMap(options.tikvStores, options.concurrencyPerStore), + rewriteMode: options.rewriteMode, + cacheKey: fmt.Sprintf("BR-%s-%d", time.Now().Format("20060102150405"), rand.Int63()), + concurrencyPerStore: options.concurrencyPerStore, cond: sync.NewCond(new(sync.Mutex)), + closeCallbacks: options.closeCallbacks, } - err := fileImporter.checkMultiIngestSupport(ctx, tikvStores) - return fileImporter, errors.Trace(err) + for _, f := range options.createCallBacks { + err := f(fileImporter) + if err != nil { + return nil, errors.Trace(err) + } + } + return fileImporter, nil } -func (importer *SnapFileImporter) WaitUntilUnblock() { +func (importer *SnapFileImporter) PauseForBackpressure() { importer.cond.L.Lock() for importer.ShouldBlock() { // wait for download worker notified @@ -209,6 +265,12 @@ func (importer *SnapFileImporter) releaseToken(tokenCh chan struct{}) { func (importer *SnapFileImporter) Close() error { if importer != nil && importer.importClient != nil { + for _, f := range importer.closeCallbacks { + err := f(importer) + if err != nil { + log.Warn("failed on close snap importer", zap.Error(err)) + } + } return importer.importClient.CloseGrpcClient() } return nil @@ -222,8 +284,8 @@ func (importer *SnapFileImporter) SetDownloadSpeedLimit(ctx context.Context, sto return errors.Trace(err) } -// checkMultiIngestSupport checks whether all stores support multi-ingest -func (importer *SnapFileImporter) checkMultiIngestSupport(ctx context.Context, tikvStores []*metapb.Store) error { +// CheckMultiIngestSupport checks whether all stores support multi-ingest +func (importer *SnapFileImporter) CheckMultiIngestSupport(ctx context.Context, tikvStores []*metapb.Store) error { storeIDs := make([]uint64, 0, len(tikvStores)) for _, s := range tikvStores { if s.State != metapb.StoreState_Up { @@ -274,7 +336,7 @@ func getKeyRangeByMode(mode KvMode) func(f *backuppb.File, rules *restoreutils.R // getKeyRangeForFiles gets the maximum range on files. func (importer *SnapFileImporter) getKeyRangeForFiles( - filesGroup []TableIDWithFiles, + filesGroup []restore.BackupFileSet, ) ([]byte, []byte, error) { var ( startKey, endKey []byte @@ -283,7 +345,7 @@ func (importer *SnapFileImporter) getKeyRangeForFiles( ) getRangeFn := getKeyRangeByMode(importer.kvMode) for _, files := range filesGroup { - for _, f := range files.Files { + for _, f := range files.SSTFiles { start, end, err = getRangeFn(f, files.RewriteRules) if err != nil { return nil, nil, errors.Trace(err) @@ -300,17 +362,15 @@ func (importer *SnapFileImporter) getKeyRangeForFiles( return startKey, endKey, nil } -// ImportSSTFiles tries to import a file. +// Import tries to import a file. // Assert 1: All rewrite rules must contain raw key prefix. // Assert 2: len(filesGroup[any].Files) > 0. -func (importer *SnapFileImporter) ImportSSTFiles( +func (importer *SnapFileImporter) Import( ctx context.Context, - filesGroup []TableIDWithFiles, - cipher *backuppb.CipherInfo, - apiVersion kvrpcpb.APIVersion, + backupFileSets ...restore.BackupFileSet, ) error { // Rewrite the start key and end key of file to scan regions - startKey, endKey, err := importer.getKeyRangeForFiles(filesGroup) + startKey, endKey, err := importer.getKeyRangeForFiles(backupFileSets) if err != nil { return errors.Trace(err) } @@ -329,7 +389,7 @@ func (importer *SnapFileImporter) ImportSSTFiles( for _, regionInfo := range regionInfos { info := regionInfo // Try to download file. - downloadMetas, errDownload := importer.download(ctx, info, filesGroup, cipher, apiVersion) + downloadMetas, errDownload := importer.download(ctx, info, backupFileSets, importer.cipher, importer.apiVersion) if errDownload != nil { log.Warn("download file failed, retry later", logutil.Region(info.Region), @@ -355,11 +415,11 @@ func (importer *SnapFileImporter) ImportSSTFiles( return nil }, utils.NewImportSSTBackoffer()) if err != nil { - log.Error("import sst file failed after retry, stop the whole progress", zapFilesGroup(filesGroup), zap.Error(err)) + log.Error("import sst file failed after retry, stop the whole progress", restore.ZapBatchBackupFileSet(backupFileSets), zap.Error(err)) return errors.Trace(err) } - for _, files := range filesGroup { - for _, f := range files.Files { + for _, files := range backupFileSets { + for _, f := range files.SSTFiles { summary.CollectSuccessUnit(summary.TotalKV, 1, f.TotalKvs) summary.CollectSuccessUnit(summary.TotalBytes, 1, f.TotalBytes) } @@ -452,7 +512,7 @@ func getSSTMetaFromFile( func (importer *SnapFileImporter) download( ctx context.Context, regionInfo *split.RegionInfo, - filesGroup []TableIDWithFiles, + filesGroup []restore.BackupFileSet, cipher *backuppb.CipherInfo, apiVersion kvrpcpb.APIVersion, ) ([]*import_sstpb.SSTMeta, error) { @@ -476,7 +536,7 @@ func (importer *SnapFileImporter) download( e = status.Error(codes.Unavailable, "the connection to TiKV has been cut by a neko, meow :3") }) if isDecryptSstErr(e) { - log.Info("fail to decrypt when download sst, try again with no-crypt", zapFilesGroup(filesGroup)) + log.Info("fail to decrypt when download sst, try again with no-crypt") if importer.kvMode == Raw || importer.kvMode == Txn { downloadMetas, e = importer.downloadRawKVSST(ctx, regionInfo, filesGroup, nil, apiVersion) } else { @@ -558,7 +618,7 @@ func (importer *SnapFileImporter) buildDownloadRequest( func (importer *SnapFileImporter) downloadSST( ctx context.Context, regionInfo *split.RegionInfo, - filesGroup []TableIDWithFiles, + filesGroup []restore.BackupFileSet, cipher *backuppb.CipherInfo, apiVersion kvrpcpb.APIVersion, ) ([]*import_sstpb.SSTMeta, error) { @@ -567,7 +627,7 @@ func (importer *SnapFileImporter) downloadSST( resultMetasMap := make(map[string]*import_sstpb.SSTMeta) downloadReqsMap := make(map[string]*import_sstpb.DownloadRequest) for _, files := range filesGroup { - for _, file := range files.Files { + for _, file := range files.SSTFiles { req, sstMeta, err := importer.buildDownloadRequest(file, files.RewriteRules, regionInfo, cipher) if err != nil { return nil, errors.Trace(err) @@ -650,13 +710,13 @@ func (importer *SnapFileImporter) downloadSST( func (importer *SnapFileImporter) downloadRawKVSST( ctx context.Context, regionInfo *split.RegionInfo, - filesGroup []TableIDWithFiles, + filesGroup []restore.BackupFileSet, cipher *backuppb.CipherInfo, apiVersion kvrpcpb.APIVersion, ) ([]*import_sstpb.SSTMeta, error) { downloadMetas := make([]*import_sstpb.SSTMeta, 0, len(filesGroup)*2+1) for _, files := range filesGroup { - for _, file := range files.Files { + for _, file := range files.SSTFiles { // Empty rule var rule import_sstpb.RewriteRule sstMeta, err := getSSTMetaFromFile(file, regionInfo.Region, &rule, RewriteModeLegacy) diff --git a/br/pkg/restore/snap_client/import_test.go b/br/pkg/restore/snap_client/import_test.go index 324b2ec9a007a..9d9c79fe1a6f6 100644 --- a/br/pkg/restore/snap_client/import_test.go +++ b/br/pkg/restore/snap_client/import_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/tidb/br/pkg/restore" importclient "github.com/pingcap/tidb/br/pkg/restore/internal/import_client" snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" "github.com/pingcap/tidb/br/pkg/restore/split" @@ -74,7 +75,7 @@ func TestGetKeyRangeByMode(t *testing.T) { require.Equal(t, []byte(""), end) // normal kv: the keys must be encoded. - testFn := snapclient.GetKeyRangeByMode(snapclient.TiDB) + testFn := snapclient.GetKeyRangeByMode(snapclient.TiDBFull) start, end, err = testFn(file, rule) require.NoError(t, err) require.Equal(t, codec.EncodeBytes(nil, []byte("t2a")), start) @@ -161,7 +162,8 @@ func TestSnapImporter(t *testing.T) { splitClient.AppendPdRegion(region) } importClient := newFakeImporterClient() - importer, err := snapclient.NewSnapFileImporter(ctx, splitClient, importClient, nil, false, false, generateStores(), snapclient.RewriteModeKeyspace, 10) + opt := snapclient.NewSnapFileImporterOptionsForTest(splitClient, importClient, generateStores(), snapclient.RewriteModeKeyspace, 10) + importer, err := snapclient.NewSnapFileImporter(ctx, kvrpcpb.APIVersion_V1, snapclient.TiDBFull, opt) require.NoError(t, err) err = importer.SetDownloadSpeedLimit(ctx, 1, 5) require.NoError(t, err) @@ -170,8 +172,8 @@ func TestSnapImporter(t *testing.T) { require.Error(t, err) files, rules := generateFiles() for _, file := range files { - importer.WaitUntilUnblock() - err = importer.ImportSSTFiles(ctx, []snapclient.TableIDWithFiles{{Files: []*backuppb.File{file}, RewriteRules: rules}}, nil, kvrpcpb.APIVersion_V1) + importer.PauseForBackpressure() + err = importer.Import(ctx, restore.BackupFileSet{SSTFiles: []*backuppb.File{file}, RewriteRules: rules}) require.NoError(t, err) } err = importer.Close() @@ -185,14 +187,15 @@ func TestSnapImporterRaw(t *testing.T) { splitClient.AppendPdRegion(region) } importClient := newFakeImporterClient() - importer, err := snapclient.NewSnapFileImporter(ctx, splitClient, importClient, nil, true, false, generateStores(), snapclient.RewriteModeKeyspace, 10) + opt := snapclient.NewSnapFileImporterOptionsForTest(splitClient, importClient, generateStores(), snapclient.RewriteModeKeyspace, 10) + importer, err := snapclient.NewSnapFileImporter(ctx, kvrpcpb.APIVersion_V1, snapclient.Raw, opt) require.NoError(t, err) err = importer.SetRawRange([]byte(""), []byte("")) require.NoError(t, err) files, rules := generateFiles() for _, file := range files { - importer.WaitUntilUnblock() - err = importer.ImportSSTFiles(ctx, []snapclient.TableIDWithFiles{{Files: []*backuppb.File{file}, RewriteRules: rules}}, nil, kvrpcpb.APIVersion_V1) + importer.PauseForBackpressure() + err = importer.Import(ctx, restore.BackupFileSet{SSTFiles: []*backuppb.File{file}, RewriteRules: rules}) require.NoError(t, err) } err = importer.Close() diff --git a/br/pkg/restore/snap_client/pipeline_items.go b/br/pkg/restore/snap_client/pipeline_items.go index 3f74434e72f02..8f1b7c129202f 100644 --- a/br/pkg/restore/snap_client/pipeline_items.go +++ b/br/pkg/restore/snap_client/pipeline_items.go @@ -19,10 +19,8 @@ import ( "time" "github.com/pingcap/errors" - backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/glue" - "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/storage" @@ -34,7 +32,6 @@ import ( "github.com/pingcap/tidb/pkg/util/engine" pdhttp "github.com/tikv/pd/client/http" "go.uber.org/zap" - "go.uber.org/zap/zapcore" "golang.org/x/sync/errgroup" ) @@ -58,50 +55,6 @@ type PhysicalTable struct { RewriteRules *restoreutils.RewriteRules } -type TableIDWithFiles struct { - TableID int64 - - Files []*backuppb.File - // RewriteRules is the rewrite rules for the specify table. - // because these rules belongs to the *one table*. - // we can hold them here. - RewriteRules *restoreutils.RewriteRules -} - -type zapFilesGroupMarshaler []TableIDWithFiles - -// MarshalLogObjectForFiles is an internal util function to zap something having `Files` field. -func MarshalLogObjectForFiles(files []TableIDWithFiles, encoder zapcore.ObjectEncoder) error { - return zapFilesGroupMarshaler(files).MarshalLogObject(encoder) -} - -func (fgs zapFilesGroupMarshaler) MarshalLogObject(encoder zapcore.ObjectEncoder) error { - elements := make([]string, 0) - total := 0 - totalKVs := uint64(0) - totalBytes := uint64(0) - totalSize := uint64(0) - for _, fg := range fgs { - for _, f := range fg.Files { - total += 1 - elements = append(elements, f.GetName()) - totalKVs += f.GetTotalKvs() - totalBytes += f.GetTotalBytes() - totalSize += f.GetSize_() - } - } - encoder.AddInt("total", total) - _ = encoder.AddArray("files", logutil.AbbreviatedArrayMarshaler(elements)) - encoder.AddUint64("totalKVs", totalKVs) - encoder.AddUint64("totalBytes", totalBytes) - encoder.AddUint64("totalSize", totalSize) - return nil -} - -func zapFilesGroup(filesGroup []TableIDWithFiles) zap.Field { - return zap.Object("files", zapFilesGroupMarshaler(filesGroup)) -} - func defaultOutputTableChan() chan *CreatedTable { return make(chan *CreatedTable, defaultChannelSize) } diff --git a/br/pkg/restore/snap_client/tikv_sender.go b/br/pkg/restore/snap_client/tikv_sender.go index 85aeac0d76f24..57f73835beda7 100644 --- a/br/pkg/restore/snap_client/tikv_sender.go +++ b/br/pkg/restore/snap_client/tikv_sender.go @@ -25,15 +25,13 @@ import ( "github.com/pingcap/failpoint" backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" - "github.com/pingcap/tidb/br/pkg/checkpoint" - "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/restore" "github.com/pingcap/tidb/br/pkg/restore/split" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/pkg/tablecodec" "go.uber.org/zap" - "golang.org/x/sync/errgroup" ) func getSortedPhysicalTables(createdTables []*CreatedTable) []*PhysicalTable { @@ -91,7 +89,7 @@ func mapTableToFiles(files []*backuppb.File) (map[int64][]*backuppb.File, int) { } // filterOutFiles filters out files that exist in the checkpoint set. -func filterOutFiles(checkpointSet map[string]struct{}, files []*backuppb.File, updateCh glue.Progress) []*backuppb.File { +func filterOutFiles(checkpointSet map[string]struct{}, files []*backuppb.File, onProgress func(int64)) []*backuppb.File { progress := int(0) totalKVs := uint64(0) totalBytes := uint64(0) @@ -110,7 +108,7 @@ func filterOutFiles(checkpointSet map[string]struct{}, files []*backuppb.File, u } if progress > 0 { // (split/scatter + download/ingest) / (default cf + write cf) - updateCh.IncBy(int64(progress) * 2 / 2) + onProgress(int64(progress) * 2 / 2) summary.CollectSuccessUnit(summary.TotalKV, progress, totalKVs) summary.CollectSuccessUnit(summary.SkippedKVCountByCheckpoint, progress, totalKVs) summary.CollectSuccessUnit(summary.TotalBytes, progress, totalBytes) @@ -130,8 +128,8 @@ func SortAndValidateFileRanges( checkpointSetWithTableID map[int64]map[string]struct{}, splitSizeBytes, splitKeyCount uint64, splitOnTable bool, - updateCh glue.Progress, -) ([][]byte, [][]TableIDWithFiles, error) { + onProgress func(int64), +) ([][]byte, []restore.BatchBackupFileSet, error) { sortedPhysicalTables := getSortedPhysicalTables(createdTables) // mapping table ID to its backup files fileOfTable, hintSplitKeyCount := mapTableToFiles(allFiles) @@ -144,8 +142,8 @@ func SortAndValidateFileRanges( lastKey []byte = nil // group the files by the generated split keys - tableIDWithFilesGroup = make([][]TableIDWithFiles, 0, hintSplitKeyCount) - lastFilesGroup []TableIDWithFiles = nil + tableIDWithFilesGroup = make([]restore.BatchBackupFileSet, 0, hintSplitKeyCount) + lastFilesGroup restore.BatchBackupFileSet = nil // statistic mergedRangeCount = 0 @@ -232,17 +230,17 @@ func SortAndValidateFileRanges( // checkpoint filter out the import done files in the previous restore executions. // Notice that skip ranges after select split keys in order to make the split keys // always the same. - newFiles := filterOutFiles(checkpointSet, rg.Files, updateCh) + newFiles := filterOutFiles(checkpointSet, rg.Files, onProgress) // append the new files into the group if len(newFiles) > 0 { if len(lastFilesGroup) == 0 || lastFilesGroup[len(lastFilesGroup)-1].TableID != table.NewPhysicalID { - lastFilesGroup = append(lastFilesGroup, TableIDWithFiles{ + lastFilesGroup = append(lastFilesGroup, restore.BackupFileSet{ TableID: table.NewPhysicalID, - Files: nil, + SSTFiles: nil, RewriteRules: table.RewriteRules, }) } - lastFilesGroup[len(lastFilesGroup)-1].Files = append(lastFilesGroup[len(lastFilesGroup)-1].Files, newFiles...) + lastFilesGroup[len(lastFilesGroup)-1].SSTFiles = append(lastFilesGroup[len(lastFilesGroup)-1].SSTFiles, newFiles...) } } @@ -286,7 +284,7 @@ func (rc *SnapClient) RestoreTables( checkpointSetWithTableID map[int64]map[string]struct{}, splitSizeBytes, splitKeyCount uint64, splitOnTable bool, - updateCh glue.Progress, + onProgress func(int64), ) error { if err := placementRuleManager.SetPlacementRule(ctx, createdTables); err != nil { return errors.Trace(err) @@ -299,20 +297,21 @@ func (rc *SnapClient) RestoreTables( }() start := time.Now() - sortedSplitKeys, tableIDWithFilesGroup, err := SortAndValidateFileRanges(createdTables, allFiles, checkpointSetWithTableID, splitSizeBytes, splitKeyCount, splitOnTable, updateCh) + sortedSplitKeys, tableIDWithFilesGroup, err := SortAndValidateFileRanges(createdTables, allFiles, checkpointSetWithTableID, splitSizeBytes, splitKeyCount, splitOnTable, onProgress) if err != nil { return errors.Trace(err) } log.Info("Restore Stage Duration", zap.String("stage", "merge ranges"), zap.Duration("take", time.Since(start))) + newProgress := func(i int64) { onProgress(i) } start = time.Now() - if err = rc.SplitPoints(ctx, sortedSplitKeys, updateCh, false); err != nil { + if err = rc.SplitPoints(ctx, sortedSplitKeys, newProgress, false); err != nil { return errors.Trace(err) } log.Info("Restore Stage Duration", zap.String("stage", "split regions"), zap.Duration("take", time.Since(start))) start = time.Now() - if err = rc.RestoreSSTFiles(ctx, tableIDWithFilesGroup, updateCh); err != nil { + if err = rc.RestoreSSTFiles(ctx, tableIDWithFilesGroup, newProgress); err != nil { return errors.Trace(err) } elapsed := time.Since(start) @@ -327,15 +326,14 @@ func (rc *SnapClient) RestoreTables( func (rc *SnapClient) SplitPoints( ctx context.Context, sortedSplitKeys [][]byte, - updateCh glue.Progress, + onProgress func(int64), isRawKv bool, ) error { splitClientOpts := make([]split.ClientOptionalParameter, 0, 2) splitClientOpts = append(splitClientOpts, split.WithOnSplit(func(keys [][]byte) { - for range keys { - updateCh.Inc() - } + onProgress(int64(len(keys))) })) + // TODO seems duplicate with metaClient. if isRawKv { splitClientOpts = append(splitClientOpts, split.WithRawKV()) } @@ -366,13 +364,9 @@ func getFileRangeKey(f string) string { // RestoreSSTFiles tries to do something prepare work, such as set speed limit, and restore the files. func (rc *SnapClient) RestoreSSTFiles( ctx context.Context, - tableIDWithFilesGroup [][]TableIDWithFiles, - updateCh glue.Progress, + tableIDWithFilesGroup []restore.BatchBackupFileSet, + onProgress func(int64), ) (retErr error) { - if err := rc.setSpeedLimit(ctx, rc.rateLimit); err != nil { - return errors.Trace(err) - } - failpoint.Inject("corrupt-files", func(v failpoint.Value) { if cmd, ok := v.(string); ok { switch cmd { @@ -382,7 +376,7 @@ func (rc *SnapClient) RestoreSSTFiles( case "only-last-table-files": // check whether all the files, except last table files, are skipped by checkpoint for _, tableIDWithFiless := range tableIDWithFilesGroup[:len(tableIDWithFilesGroup)-1] { for _, tableIDWithFiles := range tableIDWithFiless { - if len(tableIDWithFiles.Files) > 0 { + if len(tableIDWithFiles.SSTFiles) > 0 { log.Panic("has files but not the last table files") } } @@ -391,69 +385,9 @@ func (rc *SnapClient) RestoreSSTFiles( } }) - return rc.restoreSSTFilesInternal(ctx, tableIDWithFilesGroup, updateCh) -} - -func (rc *SnapClient) restoreSSTFilesInternal( - ctx context.Context, - tableIDWithFilesGroup [][]TableIDWithFiles, - updateCh glue.Progress, -) error { - eg, ectx := errgroup.WithContext(ctx) - for _, tableIDWithFiles := range tableIDWithFilesGroup { - if ectx.Err() != nil { - log.Warn("Restoring encountered error and already stopped, give up remained files.", - logutil.ShortError(ectx.Err())) - // We will fetch the error from the errgroup then (If there were). - // Also note if the parent context has been canceled or something, - // breaking here directly is also a reasonable behavior. - break - } - filesReplica := tableIDWithFiles - rc.fileImporter.WaitUntilUnblock() - rc.workerPool.ApplyOnErrorGroup(eg, func() (restoreErr error) { - fileStart := time.Now() - defer func() { - if restoreErr == nil { - log.Info("import files done", zapFilesGroup(filesReplica), - zap.Duration("take", time.Since(fileStart))) - updateCh.Inc() - } - }() - if importErr := rc.fileImporter.ImportSSTFiles(ectx, filesReplica, rc.cipher, rc.dom.Store().GetCodec().GetAPIVersion()); importErr != nil { - return errors.Trace(importErr) - } - - // the data of this range has been import done - if rc.checkpointRunner != nil && len(filesReplica) > 0 { - for _, filesGroup := range filesReplica { - rangeKeySet := make(map[string]struct{}) - for _, file := range filesGroup.Files { - rangeKey := getFileRangeKey(file.Name) - // Assert that the files having the same rangeKey are all in the current filesGroup.Files - rangeKeySet[rangeKey] = struct{}{} - } - for rangeKey := range rangeKeySet { - // The checkpoint range shows this ranges of kvs has been restored into - // the table corresponding to the table-id. - if err := checkpoint.AppendRangesForRestore(ectx, rc.checkpointRunner, filesGroup.TableID, rangeKey); err != nil { - return errors.Trace(err) - } - } - } - } - - return nil - }) - } - - if err := eg.Wait(); err != nil { - summary.CollectFailureUnit("file", err) - log.Error("restore files failed", zap.Error(err)) - return errors.Trace(err) + retErr = rc.restorer.GoRestore(onProgress, tableIDWithFilesGroup...) + if retErr != nil { + return retErr } - // Once the parent context canceled and there is no task running in the errgroup, - // we may break the for loop without error in the errgroup. (Will this happen?) - // At that time, return the error in the context here. - return ctx.Err() + return rc.restorer.WaitUntilFinish() } diff --git a/br/pkg/restore/snap_client/tikv_sender_test.go b/br/pkg/restore/snap_client/tikv_sender_test.go index b23ec6298d40f..b5a38ffc839a3 100644 --- a/br/pkg/restore/snap_client/tikv_sender_test.go +++ b/br/pkg/restore/snap_client/tikv_sender_test.go @@ -22,6 +22,7 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/metautil" + "github.com/pingcap/tidb/br/pkg/restore" snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/pkg/kv" @@ -207,14 +208,14 @@ func key(tableID int64, row int) []byte { return tablecodec.EncodeRowKeyWithHandle(downstreamID(tableID), kv.IntHandle(row)) } -func files(physicalTableID int64, startRows []int, cfs []string) snapclient.TableIDWithFiles { +func files(physicalTableID int64, startRows []int, cfs []string) restore.BackupFileSet { files := make([]*backuppb.File, 0, len(startRows)) for i, startRow := range startRows { files = append(files, &backuppb.File{Name: fmt.Sprintf("file_%d_%d_%s.sst", physicalTableID, startRow, cfs[i])}) } - return snapclient.TableIDWithFiles{ - TableID: downstreamID(physicalTableID), - Files: files, + return restore.BackupFileSet{ + TableID: downstreamID(physicalTableID), + SSTFiles: files, } } @@ -247,7 +248,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { // expected result splitKeys [][]byte - tableIDWithFilesGroups [][]snapclient.TableIDWithFiles + tableIDWithFilesGroups [][]restore.BackupFileSet }{ { // large sst, split-on-table, no checkpoint upstreamTableIDs: []int64{100, 200, 300}, @@ -270,7 +271,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ /*split table key*/ key(202, 2), /*split table key*/ }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{1, 1}, []string{w, d})}, {files(202, []int{2, 2}, []string{w, d})}, @@ -298,7 +299,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ /*split table key*/ key(202, 2), /*split table key*/ }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, //{files(202, []int{1, 1}, []string{w, d})}, {files(202, []int{2, 2}, []string{w, d})}, @@ -323,7 +324,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(102, 2), key(202, 2), key(202, 3), key(302, 2), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{1, 1}, []string{w, d})}, {files(202, []int{2, 2}, []string{w, d})}, @@ -351,7 +352,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(102, 2), key(202, 2), key(202, 3), key(302, 2), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, //{files(202, []int{1, 1}, []string{w, d})}, {files(202, []int{2, 2}, []string{w, d})}, @@ -376,7 +377,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(202, 2), /*split table key*/ }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{1, 1}, []string{w, d})}, {files(202, []int{2, 2}, []string{w, d})}, @@ -404,7 +405,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(202, 2), /*split table key*/ }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, // {files(202, []int{1, 1}, []string{w, d})}, {files(202, []int{2, 2}, []string{w, d})}, @@ -429,7 +430,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(202, 2), key(302, 2), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w}), files(202, []int{1, 1}, []string{w, d})}, {files(202, []int{2, 2}, []string{w, d}), files(302, []int{1}, []string{w})}, {files(100, []int{1, 1}, []string{w, d})}, @@ -455,7 +456,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(202, 2), key(302, 2), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{2, 2}, []string{w, d}), files(302, []int{1}, []string{w})}, }, @@ -475,7 +476,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeyCount: 450, splitOnTable: true, splitKeys: [][]byte{}, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{1, 1, 2, 2}, []string{w, d, w, d})}, {files(302, []int{1}, []string{w})}, @@ -500,7 +501,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeyCount: 450, splitOnTable: true, splitKeys: [][]byte{}, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{2, 2}, []string{w, d})}, {files(302, []int{1}, []string{w})}, @@ -523,7 +524,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(102, 2), key(202, 3), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{1, 1, 2, 2}, []string{w, d, w, d})}, {files(302, []int{1}, []string{w}), files(100, []int{1, 1}, []string{w, d})}, @@ -549,7 +550,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(102, 2), key(202, 3), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{2, 2}, []string{w, d})}, {files(302, []int{1}, []string{w})}, @@ -572,7 +573,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(202, 3), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w}), files(202, []int{1, 1, 2, 2}, []string{w, d, w, d})}, {files(302, []int{1}, []string{w}), files(100, []int{1, 1}, []string{w, d})}, }, @@ -597,7 +598,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(202, 3), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w}), files(202, []int{2, 2}, []string{w, d, w, d})}, {files(302, []int{1}, []string{w})}, }, @@ -619,7 +620,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(202, 2), key(302, 2), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w}), files(202, []int{1, 1}, []string{w, d})}, {files(202, []int{2, 2}, []string{w, d}), files(302, []int{1}, []string{w})}, {files(100, []int{1, 1}, []string{w, d})}, @@ -645,7 +646,7 @@ func TestSortAndValidateFileRanges(t *testing.T) { splitKeys: [][]byte{ key(202, 2), key(302, 2), key(100, 2), }, - tableIDWithFilesGroups: [][]snapclient.TableIDWithFiles{ + tableIDWithFilesGroups: [][]restore.BackupFileSet{ {files(102, []int{1}, []string{w})}, {files(202, []int{2, 2}, []string{w, d}), files(302, []int{1}, []string{w})}, }, @@ -655,7 +656,8 @@ func TestSortAndValidateFileRanges(t *testing.T) { for i, cs := range cases { t.Log(i) createdTables := generateCreatedTables(t, cs.upstreamTableIDs, cs.upstreamPartitionIDs, downstreamID) - splitKeys, tableIDWithFilesGroups, err := snapclient.SortAndValidateFileRanges(createdTables, cs.files, cs.checkpointSetWithTableID, cs.splitSizeBytes, cs.splitKeyCount, cs.splitOnTable, updateCh) + onProgress := func(i int64) { updateCh.IncBy(i) } + splitKeys, tableIDWithFilesGroups, err := snapclient.SortAndValidateFileRanges(createdTables, cs.files, cs.checkpointSetWithTableID, cs.splitSizeBytes, cs.splitKeyCount, cs.splitOnTable, onProgress) require.NoError(t, err) require.Equal(t, cs.splitKeys, splitKeys) require.Equal(t, len(cs.tableIDWithFilesGroups), len(tableIDWithFilesGroups)) @@ -665,8 +667,8 @@ func TestSortAndValidateFileRanges(t *testing.T) { for j, expectFiles := range expectFilesGroup { actualFiles := actualFilesGroup[j] require.Equal(t, expectFiles.TableID, actualFiles.TableID) - for k, expectFile := range expectFiles.Files { - actualFile := actualFiles.Files[k] + for k, expectFile := range expectFiles.SSTFiles { + actualFile := actualFiles.SSTFiles[k] require.Equal(t, expectFile.Name, actualFile.Name) } } diff --git a/br/pkg/restore/split/BUILD.bazel b/br/pkg/restore/split/BUILD.bazel index e27bb1834d7ac..34eb266023809 100644 --- a/br/pkg/restore/split/BUILD.bazel +++ b/br/pkg/restore/split/BUILD.bazel @@ -7,6 +7,7 @@ go_library( "mock_pd_client.go", "region.go", "split.go", + "splitter.go", "sum_sorted.go", ], importpath = "github.com/pingcap/tidb/br/pkg/restore/split", @@ -32,7 +33,6 @@ go_library( "@com_github_google_btree//:btree", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", - "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/errorpb", "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_pingcap_kvproto//pkg/metapb", @@ -63,7 +63,7 @@ go_test( ], embed = [":split"], flaky = True, - shard_count = 27, + shard_count = 26, deps = [ "//br/pkg/errors", "//br/pkg/restore/utils", @@ -75,10 +75,8 @@ go_test( "//pkg/tablecodec", "//pkg/types", "//pkg/util/codec", - "@com_github_docker_go_units//:go-units", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", - "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/import_sstpb", "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_pingcap_kvproto//pkg/metapb", diff --git a/br/pkg/restore/split/mock_pd_client.go b/br/pkg/restore/split/mock_pd_client.go index 6df6cd56f94b0..535b064700d41 100644 --- a/br/pkg/restore/split/mock_pd_client.go +++ b/br/pkg/restore/split/mock_pd_client.go @@ -23,6 +23,8 @@ import ( "google.golang.org/grpc/status" ) +// TODO consilodate TestClient and MockPDClientForSplit and FakePDClient +// into one test client. type TestClient struct { SplitClient pd.Client diff --git a/br/pkg/restore/split/split.go b/br/pkg/restore/split/split.go index f7df83cd2e3e9..726c4b89794fc 100644 --- a/br/pkg/restore/split/split.go +++ b/br/pkg/restore/split/split.go @@ -7,25 +7,18 @@ import ( "context" "encoding/hex" goerrors "errors" - "sort" - "sync" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" - backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/logutil" - restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/lightning/config" - "github.com/pingcap/tidb/pkg/tablecodec" - "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/redact" "go.uber.org/zap" - "golang.org/x/sync/errgroup" ) var ( @@ -44,347 +37,6 @@ const ( ScanRegionPaginationLimit = 128 ) -type rewriteSplitter struct { - rewriteKey []byte - tableID int64 - rule *restoreutils.RewriteRules - splitter *SplitHelper -} - -type splitHelperIterator struct { - tableSplitters []*rewriteSplitter -} - -func (iter *splitHelperIterator) Traverse(fn func(v Valued, endKey []byte, rule *restoreutils.RewriteRules) bool) { - for _, entry := range iter.tableSplitters { - endKey := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(entry.tableID+1)) - rule := entry.rule - entry.splitter.Traverse(func(v Valued) bool { - return fn(v, endKey, rule) - }) - } -} - -type LogSplitHelper struct { - tableSplitter map[int64]*SplitHelper - rules map[int64]*restoreutils.RewriteRules - client SplitClient - pool *util.WorkerPool - eg *errgroup.Group - regionsCh chan []*RegionInfo - - splitThresholdSize uint64 - splitThresholdKeys int64 -} - -func NewLogSplitHelper(rules map[int64]*restoreutils.RewriteRules, client SplitClient, splitSize uint64, splitKeys int64) *LogSplitHelper { - return &LogSplitHelper{ - tableSplitter: make(map[int64]*SplitHelper), - rules: rules, - client: client, - pool: util.NewWorkerPool(128, "split region"), - eg: nil, - - splitThresholdSize: splitSize, - splitThresholdKeys: splitKeys, - } -} - -func (helper *LogSplitHelper) iterator() *splitHelperIterator { - tableSplitters := make([]*rewriteSplitter, 0, len(helper.tableSplitter)) - for tableID, splitter := range helper.tableSplitter { - delete(helper.tableSplitter, tableID) - rewriteRule, exists := helper.rules[tableID] - if !exists { - log.Info("skip splitting due to no table id matched", zap.Int64("tableID", tableID)) - continue - } - newTableID := restoreutils.GetRewriteTableID(tableID, rewriteRule) - if newTableID == 0 { - log.Warn("failed to get the rewrite table id", zap.Int64("tableID", tableID)) - continue - } - tableSplitters = append(tableSplitters, &rewriteSplitter{ - rewriteKey: codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(newTableID)), - tableID: newTableID, - rule: rewriteRule, - splitter: splitter, - }) - } - sort.Slice(tableSplitters, func(i, j int) bool { - return bytes.Compare(tableSplitters[i].rewriteKey, tableSplitters[j].rewriteKey) < 0 - }) - return &splitHelperIterator{ - tableSplitters: tableSplitters, - } -} - -const splitFileThreshold = 1024 * 1024 // 1 MB - -func (helper *LogSplitHelper) skipFile(file *backuppb.DataFileInfo) bool { - _, exist := helper.rules[file.TableId] - return file.Length < splitFileThreshold || file.IsMeta || !exist -} - -func (helper *LogSplitHelper) Merge(file *backuppb.DataFileInfo) { - if helper.skipFile(file) { - return - } - splitHelper, exist := helper.tableSplitter[file.TableId] - if !exist { - splitHelper = NewSplitHelper() - helper.tableSplitter[file.TableId] = splitHelper - } - - splitHelper.Merge(Valued{ - Key: Span{ - StartKey: file.StartKey, - EndKey: file.EndKey, - }, - Value: Value{ - Size: file.Length, - Number: file.NumberOfEntries, - }, - }) -} - -type splitFunc = func(context.Context, *RegionSplitter, uint64, int64, *RegionInfo, []Valued) error - -func (helper *LogSplitHelper) splitRegionByPoints( - ctx context.Context, - regionSplitter *RegionSplitter, - initialLength uint64, - initialNumber int64, - region *RegionInfo, - valueds []Valued, -) error { - var ( - splitPoints [][]byte = make([][]byte, 0) - lastKey []byte = region.Region.StartKey - length uint64 = initialLength - number int64 = initialNumber - ) - for _, v := range valueds { - // decode will discard ts behind the key, which results in the same key for consecutive ranges - if !bytes.Equal(lastKey, v.GetStartKey()) && (v.Value.Size+length > helper.splitThresholdSize || v.Value.Number+number > helper.splitThresholdKeys) { - _, rawKey, _ := codec.DecodeBytes(v.GetStartKey(), nil) - splitPoints = append(splitPoints, rawKey) - length = 0 - number = 0 - } - lastKey = v.GetStartKey() - length += v.Value.Size - number += v.Value.Number - } - - if len(splitPoints) == 0 { - return nil - } - - helper.pool.ApplyOnErrorGroup(helper.eg, func() error { - newRegions, errSplit := regionSplitter.ExecuteOneRegion(ctx, region, splitPoints) - if errSplit != nil { - log.Warn("failed to split the scaned region", zap.Error(errSplit)) - sort.Slice(splitPoints, func(i, j int) bool { - return bytes.Compare(splitPoints[i], splitPoints[j]) < 0 - }) - return regionSplitter.ExecuteSortedKeys(ctx, splitPoints) - } - select { - case <-ctx.Done(): - return nil - case helper.regionsCh <- newRegions: - } - log.Info("split the region", zap.Uint64("region-id", region.Region.Id), zap.Int("split-point-number", len(splitPoints))) - return nil - }) - return nil -} - -// SplitPoint selects ranges overlapped with each region, and calls `splitF` to split the region -func SplitPoint( - ctx context.Context, - iter *splitHelperIterator, - client SplitClient, - splitF splitFunc, -) (err error) { - // common status - var ( - regionSplitter *RegionSplitter = NewRegionSplitter(client) - ) - // region traverse status - var ( - // the region buffer of each scan - regions []*RegionInfo = nil - regionIndex int = 0 - ) - // region split status - var ( - // range span +----------------+------+---+-------------+ - // region span +------------------------------------+ - // +initial length+ +end valued+ - // regionValueds is the ranges array overlapped with `regionInfo` - regionValueds []Valued = nil - // regionInfo is the region to be split - regionInfo *RegionInfo = nil - // intialLength is the length of the part of the first range overlapped with the region - initialLength uint64 = 0 - initialNumber int64 = 0 - ) - // range status - var ( - // regionOverCount is the number of regions overlapped with the range - regionOverCount uint64 = 0 - ) - - iter.Traverse(func(v Valued, endKey []byte, rule *restoreutils.RewriteRules) bool { - if v.Value.Number == 0 || v.Value.Size == 0 { - return true - } - var ( - vStartKey []byte - vEndKey []byte - ) - // use `vStartKey` and `vEndKey` to compare with region's key - vStartKey, vEndKey, err = restoreutils.GetRewriteEncodedKeys(v, rule) - if err != nil { - return false - } - // traverse to the first region overlapped with the range - for ; regionIndex < len(regions); regionIndex++ { - if bytes.Compare(vStartKey, regions[regionIndex].Region.EndKey) < 0 { - break - } - } - // cannot find any regions overlapped with the range - // need to scan regions again - if regionIndex == len(regions) { - regions = nil - } - regionOverCount = 0 - for { - if regionIndex >= len(regions) { - var startKey []byte - if len(regions) > 0 { - // has traversed over the region buffer, should scan from the last region's end-key of the region buffer - startKey = regions[len(regions)-1].Region.EndKey - } else { - // scan from the range's start-key - startKey = vStartKey - } - // scan at most 64 regions into the region buffer - regions, err = ScanRegionsWithRetry(ctx, client, startKey, endKey, 64) - if err != nil { - return false - } - regionIndex = 0 - } - - region := regions[regionIndex] - // this region must be overlapped with the range - regionOverCount++ - // the region is the last one overlapped with the range, - // should split the last recorded region, - // and then record this region as the region to be split - if bytes.Compare(vEndKey, region.Region.EndKey) < 0 { - endLength := v.Value.Size / regionOverCount - endNumber := v.Value.Number / int64(regionOverCount) - if len(regionValueds) > 0 && regionInfo != region { - // add a part of the range as the end part - if bytes.Compare(vStartKey, regionInfo.Region.EndKey) < 0 { - regionValueds = append(regionValueds, NewValued(vStartKey, regionInfo.Region.EndKey, Value{Size: endLength, Number: endNumber})) - } - // try to split the region - err = splitF(ctx, regionSplitter, initialLength, initialNumber, regionInfo, regionValueds) - if err != nil { - return false - } - regionValueds = make([]Valued, 0) - } - if regionOverCount == 1 { - // the region completely contains the range - regionValueds = append(regionValueds, Valued{ - Key: Span{ - StartKey: vStartKey, - EndKey: vEndKey, - }, - Value: v.Value, - }) - } else { - // the region is overlapped with the last part of the range - initialLength = endLength - initialNumber = endNumber - } - regionInfo = region - // try the next range - return true - } - - // try the next region - regionIndex++ - } - }) - - if err != nil { - return errors.Trace(err) - } - if len(regionValueds) > 0 { - // try to split the region - err = splitF(ctx, regionSplitter, initialLength, initialNumber, regionInfo, regionValueds) - if err != nil { - return errors.Trace(err) - } - } - - return nil -} - -func (helper *LogSplitHelper) Split(ctx context.Context) error { - var ectx context.Context - var wg sync.WaitGroup - helper.eg, ectx = errgroup.WithContext(ctx) - helper.regionsCh = make(chan []*RegionInfo, 1024) - wg.Add(1) - go func() { - defer wg.Done() - scatterRegions := make([]*RegionInfo, 0) - receiveNewRegions: - for { - select { - case <-ctx.Done(): - return - case newRegions, ok := <-helper.regionsCh: - if !ok { - break receiveNewRegions - } - - scatterRegions = append(scatterRegions, newRegions...) - } - } - - regionSplitter := NewRegionSplitter(helper.client) - // It is too expensive to stop recovery and wait for a small number of regions - // to complete scatter, so the maximum waiting time is reduced to 1 minute. - _ = regionSplitter.WaitForScatterRegionsTimeout(ctx, scatterRegions, time.Minute) - }() - - iter := helper.iterator() - if err := SplitPoint(ectx, iter, helper.client, helper.splitRegionByPoints); err != nil { - return errors.Trace(err) - } - - // wait for completion of splitting regions - if err := helper.eg.Wait(); err != nil { - return errors.Trace(err) - } - - // wait for completion of scattering regions - close(helper.regionsCh) - wg.Wait() - - return nil -} - // RegionSplitter is a executor of region split by rules. type RegionSplitter struct { client SplitClient @@ -397,8 +49,8 @@ func NewRegionSplitter(client SplitClient) *RegionSplitter { } } -// ExecuteOneRegion expose the function `SplitWaitAndScatter` of split client. -func (rs *RegionSplitter) ExecuteOneRegion(ctx context.Context, region *RegionInfo, keys [][]byte) ([]*RegionInfo, error) { +// ExecuteSortedKeysOnRegion expose the function `SplitWaitAndScatter` of split client. +func (rs *RegionSplitter) ExecuteSortedKeysOnRegion(ctx context.Context, region *RegionInfo, keys [][]byte) ([]*RegionInfo, error) { return rs.client.SplitWaitAndScatter(ctx, region, keys) } diff --git a/br/pkg/restore/split/split_test.go b/br/pkg/restore/split/split_test.go index 4acef5dac84b7..ee53cce560187 100644 --- a/br/pkg/restore/split/split_test.go +++ b/br/pkg/restore/split/split_test.go @@ -11,10 +11,8 @@ import ( "testing" "time" - "github.com/docker/go-units" "github.com/pingcap/errors" "github.com/pingcap/failpoint" - backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" @@ -936,8 +934,8 @@ func TestSplitPoint(t *testing.T) { client.AppendRegion(keyWithTablePrefix(tableID, "h"), keyWithTablePrefix(tableID, "j")) client.AppendRegion(keyWithTablePrefix(tableID, "j"), keyWithTablePrefix(tableID+1, "a")) - iter := NewSplitHelperIteratorForTest(splitHelper, tableID, rewriteRules) - err := SplitPoint(ctx, iter, client, func(ctx context.Context, rs *RegionSplitter, u uint64, o int64, ri *RegionInfo, v []Valued) error { + iter := NewSplitHelperIterator([]*RewriteSplitter{{tableID: tableID, rule: rewriteRules, splitter: splitHelper}}) + err := SplitPoint(ctx, iter, client, func(ctx context.Context, u uint64, o int64, ri *RegionInfo, v []Valued) error { require.Equal(t, u, uint64(0)) require.Equal(t, o, int64(0)) require.Equal(t, ri.Region.StartKey, keyWithTablePrefix(tableID, "a")) @@ -994,8 +992,8 @@ func TestSplitPoint2(t *testing.T) { client.AppendRegion(keyWithTablePrefix(tableID, "o"), keyWithTablePrefix(tableID+1, "a")) firstSplit := true - iter := NewSplitHelperIteratorForTest(splitHelper, tableID, rewriteRules) - err := SplitPoint(ctx, iter, client, func(ctx context.Context, rs *RegionSplitter, u uint64, o int64, ri *RegionInfo, v []Valued) error { + iter := NewSplitHelperIterator([]*RewriteSplitter{{tableID: tableID, rule: rewriteRules, splitter: splitHelper}}) + err := SplitPoint(ctx, iter, client, func(ctx context.Context, u uint64, o int64, ri *RegionInfo, v []Valued) error { if firstSplit { require.Equal(t, u, uint64(0)) require.Equal(t, o, int64(0)) @@ -1028,86 +1026,3 @@ func TestSplitPoint2(t *testing.T) { }) require.NoError(t, err) } - -func fakeFile(tableID, rowID int64, length uint64, num int64) *backuppb.DataFileInfo { - return &backuppb.DataFileInfo{ - StartKey: fakeRowKey(tableID, rowID), - EndKey: fakeRowKey(tableID, rowID+1), - TableId: tableID, - Length: length, - NumberOfEntries: num, - } -} - -func fakeRowKey(tableID, rowID int64) kv.Key { - return codec.EncodeBytes(nil, tablecodec.EncodeRecordKey(tablecodec.GenTableRecordPrefix(tableID), kv.IntHandle(rowID))) -} - -func TestLogSplitHelper(t *testing.T) { - ctx := context.Background() - rules := map[int64]*restoreutils.RewriteRules{ - 1: { - Data: []*import_sstpb.RewriteRule{ - { - OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), - NewKeyPrefix: tablecodec.GenTableRecordPrefix(100), - }, - }, - }, - 2: { - Data: []*import_sstpb.RewriteRule{ - { - OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), - NewKeyPrefix: tablecodec.GenTableRecordPrefix(200), - }, - }, - }, - } - oriRegions := [][]byte{ - {}, - codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), - codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), - codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), - } - mockPDCli := NewMockPDClientForSplit() - mockPDCli.SetRegions(oriRegions) - client := NewClient(mockPDCli, nil, nil, 100, 4) - helper := NewLogSplitHelper(rules, client, 4*units.MiB, 400) - - helper.Merge(fakeFile(1, 100, 100, 100)) - helper.Merge(fakeFile(1, 200, 2*units.MiB, 200)) - helper.Merge(fakeFile(2, 100, 3*units.MiB, 300)) - helper.Merge(fakeFile(3, 100, 10*units.MiB, 100000)) - // different regions, no split happens - err := helper.Split(ctx) - require.NoError(t, err) - regions, err := mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) - require.NoError(t, err) - require.Len(t, regions, 3) - require.Equal(t, []byte{}, regions[0].Meta.StartKey) - require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(100)), regions[1].Meta.StartKey) - require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(200)), regions[2].Meta.StartKey) - require.Equal(t, codec.EncodeBytes(nil, tablecodec.EncodeTablePrefix(402)), regions[2].Meta.EndKey) - - helper.Merge(fakeFile(1, 300, 3*units.MiB, 10)) - helper.Merge(fakeFile(1, 400, 4*units.MiB, 10)) - // trigger to split regions for table 1 - err = helper.Split(ctx) - require.NoError(t, err) - regions, err = mockPDCli.ScanRegions(ctx, []byte{}, []byte{}, 0) - require.NoError(t, err) - require.Len(t, regions, 4) - require.Equal(t, fakeRowKey(100, 400), kv.Key(regions[1].Meta.EndKey)) -} - -func NewSplitHelperIteratorForTest(helper *SplitHelper, tableID int64, rule *restoreutils.RewriteRules) *splitHelperIterator { - return &splitHelperIterator{ - tableSplitters: []*rewriteSplitter{ - { - tableID: tableID, - rule: rule, - splitter: helper, - }, - }, - } -} diff --git a/br/pkg/restore/split/splitter.go b/br/pkg/restore/split/splitter.go new file mode 100644 index 0000000000000..5faec981d4224 --- /dev/null +++ b/br/pkg/restore/split/splitter.go @@ -0,0 +1,400 @@ +// Copyright 2024 PingCAP, Inc. Licensed under Apache-2.0. + +package split + +import ( + "bytes" + "context" + "sort" + "sync" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/codec" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +// Splitter defines the interface for basic splitting strategies. +type Splitter interface { + // ExecuteSortedKeysOnRegion splits the keys within a single region and initiates scattering + // after the region has been successfully split. + ExecuteSortedKeysOnRegion(ctx context.Context, region *RegionInfo, keys [][]byte) ([]*RegionInfo, error) + + // ExecuteSortedKeys splits all provided keys while ensuring that the newly created + // regions are balanced. + ExecuteSortedKeys(ctx context.Context, keys [][]byte) error + + // WaitForScatterRegionsTimeout blocks until all regions have finished scattering, + // or until the specified timeout duration has elapsed. + WaitForScatterRegionsTimeout(ctx context.Context, regionInfos []*RegionInfo, timeout time.Duration) int +} + +// SplitStrategy defines how values should be accumulated and when to trigger a split. +type SplitStrategy[T any] interface { + // Accumulate adds a new value into the split strategy's internal state. + // This method accumulates data points or values, preparing them for potential splitting. + Accumulate(T) + // ShouldSplit checks if the accumulated values meet the criteria for triggering a split. + ShouldSplit() bool + // Skip the file by checkpoints or invalid files + ShouldSkip(T) bool + // GetAccumulations returns an iterator for the accumulated values. + GetAccumulations() *SplitHelperIterator + // Reset the buffer for next round + ResetAccumulations() +} + +type BaseSplitStrategy struct { + AccumulateCount int + TableSplitter map[int64]*SplitHelper + Rules map[int64]*restoreutils.RewriteRules +} + +func NewBaseSplitStrategy(rules map[int64]*restoreutils.RewriteRules) *BaseSplitStrategy { + return &BaseSplitStrategy{ + AccumulateCount: 0, + TableSplitter: make(map[int64]*SplitHelper), + Rules: rules, + } +} + +func (b *BaseSplitStrategy) GetAccumulations() *SplitHelperIterator { + tableSplitters := make([]*RewriteSplitter, 0, len(b.TableSplitter)) + for tableID, splitter := range b.TableSplitter { + rewriteRule, exists := b.Rules[tableID] + if !exists { + log.Fatal("[unreachable] no table id matched", zap.Int64("tableID", tableID)) + } + newTableID := restoreutils.GetRewriteTableID(tableID, rewriteRule) + if newTableID == 0 { + log.Warn("failed to get the rewrite table id", zap.Int64("tableID", tableID)) + continue + } + tableSplitters = append(tableSplitters, NewRewriteSpliter( + // TODO remove this field. sort by newTableID + codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(newTableID)), + newTableID, + rewriteRule, + splitter, + )) + } + sort.Slice(tableSplitters, func(i, j int) bool { + return bytes.Compare(tableSplitters[i].RewriteKey, tableSplitters[j].RewriteKey) < 0 + }) + return NewSplitHelperIterator(tableSplitters) +} + +func (b *BaseSplitStrategy) ResetAccumulations() { + // always assume all previous files has been processed + // so we should reset after handling one batch accumulations + log.Info("reset accumulations") + clear(b.TableSplitter) + b.AccumulateCount = 0 +} + +type RewriteSplitter struct { + RewriteKey []byte + tableID int64 + rule *restoreutils.RewriteRules + splitter *SplitHelper +} + +func NewRewriteSpliter( + rewriteKey []byte, + tableID int64, + rule *restoreutils.RewriteRules, + splitter *SplitHelper, +) *RewriteSplitter { + return &RewriteSplitter{ + RewriteKey: rewriteKey, + tableID: tableID, + rule: rule, + splitter: splitter, + } +} + +type SplitHelperIterator struct { + tableSplitters []*RewriteSplitter +} + +func NewSplitHelperIterator(tableSplitters []*RewriteSplitter) *SplitHelperIterator { + return &SplitHelperIterator{tableSplitters: tableSplitters} +} + +func (iter *SplitHelperIterator) Traverse(fn func(v Valued, endKey []byte, rule *restoreutils.RewriteRules) bool) { + for _, entry := range iter.tableSplitters { + endKey := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(entry.tableID+1)) + rule := entry.rule + entry.splitter.Traverse(func(v Valued) bool { + return fn(v, endKey, rule) + }) + } +} + +// PipelineRegionsSplitter defines the interface for advanced (pipeline) splitting strategies. +// log / compacted sst files restore need to use this to split after full restore. +// and the splitter must perform with a control. +// so we choose to split and restore in a continuous flow. +type PipelineRegionsSplitter interface { + Splitter + ExecuteRegions(ctx context.Context, splitHelper *SplitHelperIterator) error // Method for executing pipeline-based splitting +} + +type PipelineRegionsSplitterImpl struct { + *RegionSplitter + pool *util.WorkerPool + splitThresholdSize uint64 + splitThresholdKeys int64 + + eg *errgroup.Group + regionsCh chan []*RegionInfo +} + +func NewPipelineRegionsSplitter( + client SplitClient, + splitSize uint64, + splitKeys int64, +) PipelineRegionsSplitter { + pool := util.NewWorkerPool(128, "split") + return &PipelineRegionsSplitterImpl{ + pool: pool, + RegionSplitter: NewRegionSplitter(client), + splitThresholdSize: splitSize, + splitThresholdKeys: splitKeys, + } +} + +func (r *PipelineRegionsSplitterImpl) ExecuteRegions(ctx context.Context, splitHelper *SplitHelperIterator) error { + var ectx context.Context + var wg sync.WaitGroup + r.eg, ectx = errgroup.WithContext(ctx) + r.regionsCh = make(chan []*RegionInfo, 1024) + wg.Add(1) + go func() { + defer wg.Done() + scatterRegions := make([]*RegionInfo, 0) + receiveNewRegions: + for { + select { + case <-ctx.Done(): + return + case newRegions, ok := <-r.regionsCh: + if !ok { + break receiveNewRegions + } + + scatterRegions = append(scatterRegions, newRegions...) + } + } + // It is too expensive to stop recovery and wait for a small number of regions + // to complete scatter, so the maximum waiting time is reduced to 1 minute. + _ = r.WaitForScatterRegionsTimeout(ectx, scatterRegions, time.Minute) + }() + + err := SplitPoint(ectx, splitHelper, r.client, r.splitRegionByPoints) + if err != nil { + return errors.Trace(err) + } + + // wait for completion of splitting regions + if err := r.eg.Wait(); err != nil { + return errors.Trace(err) + } + + // wait for completion of scattering regions + close(r.regionsCh) + wg.Wait() + + return nil +} + +type splitFunc = func(context.Context, uint64, int64, *RegionInfo, []Valued) error + +// SplitPoint selects ranges overlapped with each region, and calls `splitF` to split the region +func SplitPoint( + ctx context.Context, + iter *SplitHelperIterator, + client SplitClient, + splitF splitFunc, +) (err error) { + // region traverse status + var ( + // the region buffer of each scan + regions []*RegionInfo = nil + regionIndex int = 0 + ) + // region split status + var ( + // range span +----------------+------+---+-------------+ + // region span +------------------------------------+ + // +initial length+ +end valued+ + // regionValueds is the ranges array overlapped with `regionInfo` + regionValueds []Valued = nil + // regionInfo is the region to be split + regionInfo *RegionInfo = nil + // initialLength is the length of the part of the first range overlapped with the region + initialLength uint64 = 0 + initialNumber int64 = 0 + ) + // range status + var ( + // regionOverCount is the number of regions overlapped with the range + regionOverCount uint64 = 0 + ) + + iter.Traverse(func(v Valued, endKey []byte, rule *restoreutils.RewriteRules) bool { + if v.Value.Number == 0 || v.Value.Size == 0 { + return true + } + var ( + vStartKey []byte + vEndKey []byte + ) + // use `vStartKey` and `vEndKey` to compare with region's key + vStartKey, vEndKey, err = restoreutils.GetRewriteEncodedKeys(v, rule) + if err != nil { + return false + } + // traverse to the first region overlapped with the range + for ; regionIndex < len(regions); regionIndex++ { + if bytes.Compare(vStartKey, regions[regionIndex].Region.EndKey) < 0 { + break + } + } + // cannot find any regions overlapped with the range + // need to scan regions again + if regionIndex == len(regions) { + regions = nil + } + regionOverCount = 0 + for { + if regionIndex >= len(regions) { + var startKey []byte + if len(regions) > 0 { + // has traversed over the region buffer, should scan from the last region's end-key of the region buffer + startKey = regions[len(regions)-1].Region.EndKey + } else { + // scan from the range's start-key + startKey = vStartKey + } + // scan at most 64 regions into the region buffer + regions, err = ScanRegionsWithRetry(ctx, client, startKey, endKey, 64) + if err != nil { + return false + } + regionIndex = 0 + } + + region := regions[regionIndex] + // this region must be overlapped with the range + regionOverCount++ + // the region is the last one overlapped with the range, + // should split the last recorded region, + // and then record this region as the region to be split + if bytes.Compare(vEndKey, region.Region.EndKey) < 0 { + endLength := v.Value.Size / regionOverCount + endNumber := v.Value.Number / int64(regionOverCount) + if len(regionValueds) > 0 && regionInfo != region { + // add a part of the range as the end part + if bytes.Compare(vStartKey, regionInfo.Region.EndKey) < 0 { + regionValueds = append(regionValueds, NewValued(vStartKey, regionInfo.Region.EndKey, Value{Size: endLength, Number: endNumber})) + } + // try to split the region + err = splitF(ctx, initialLength, initialNumber, regionInfo, regionValueds) + if err != nil { + return false + } + regionValueds = make([]Valued, 0) + } + if regionOverCount == 1 { + // the region completely contains the range + regionValueds = append(regionValueds, Valued{ + Key: Span{ + StartKey: vStartKey, + EndKey: vEndKey, + }, + Value: v.Value, + }) + } else { + // the region is overlapped with the last part of the range + initialLength = endLength + initialNumber = endNumber + } + regionInfo = region + // try the next range + return true + } + + // try the next region + regionIndex++ + } + }) + + if err != nil { + return errors.Trace(err) + } + if len(regionValueds) > 0 { + // try to split the region + err = splitF(ctx, initialLength, initialNumber, regionInfo, regionValueds) + if err != nil { + return errors.Trace(err) + } + } + + return nil +} + +func (r *PipelineRegionsSplitterImpl) splitRegionByPoints( + ctx context.Context, + initialLength uint64, + initialNumber int64, + region *RegionInfo, + valueds []Valued, +) error { + var ( + splitPoints [][]byte = make([][]byte, 0) + lastKey []byte = region.Region.StartKey + length uint64 = initialLength + number int64 = initialNumber + ) + for _, v := range valueds { + // decode will discard ts behind the key, which results in the same key for consecutive ranges + if !bytes.Equal(lastKey, v.GetStartKey()) && (v.Value.Size+length > r.splitThresholdSize || v.Value.Number+number > r.splitThresholdKeys) { + _, rawKey, _ := codec.DecodeBytes(v.GetStartKey(), nil) + splitPoints = append(splitPoints, rawKey) + length = 0 + number = 0 + } + lastKey = v.GetStartKey() + length += v.Value.Size + number += v.Value.Number + } + + if len(splitPoints) == 0 { + return nil + } + + r.pool.ApplyOnErrorGroup(r.eg, func() error { + newRegions, errSplit := r.ExecuteSortedKeysOnRegion(ctx, region, splitPoints) + if errSplit != nil { + log.Warn("failed to split the scaned region", zap.Error(errSplit)) + sort.Slice(splitPoints, func(i, j int) bool { + return bytes.Compare(splitPoints[i], splitPoints[j]) < 0 + }) + return r.ExecuteSortedKeys(ctx, splitPoints) + } + select { + case <-ctx.Done(): + return nil + case r.regionsCh <- newRegions: + } + log.Info("split the region", zap.Uint64("region-id", region.Region.Id), zap.Int("split-point-number", len(splitPoints))) + return nil + }) + return nil +} diff --git a/br/pkg/restore/utils/rewrite_rule.go b/br/pkg/restore/utils/rewrite_rule.go index eca06a58bee6a..a664d97a5f11d 100644 --- a/br/pkg/restore/utils/rewrite_rule.go +++ b/br/pkg/restore/utils/rewrite_rule.go @@ -16,6 +16,7 @@ package utils import ( "bytes" + "strings" "github.com/pingcap/errors" backuppb "github.com/pingcap/kvproto/pkg/brpb" @@ -44,6 +45,8 @@ type RewriteRules struct { Data []*import_sstpb.RewriteRule OldKeyspace []byte NewKeyspace []byte + // used to record checkpoint data + NewTableID int64 } // Append append its argument to this rewrite rules. @@ -171,7 +174,7 @@ func GetRewriteRuleOfTable( }) } - return &RewriteRules{Data: dataRules} + return &RewriteRules{Data: dataRules, NewTableID: newTableID} } // ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file. @@ -282,6 +285,26 @@ func FindMatchedRewriteRule(file AppliedFile, rules *RewriteRules) *import_sstpb return rule } +func (r *RewriteRules) String() string { + var out strings.Builder + out.WriteRune('[') + if len(r.OldKeyspace) != 0 { + out.WriteString(redact.Key(r.OldKeyspace)) + out.WriteString(" =[ks]=> ") + out.WriteString(redact.Key(r.NewKeyspace)) + } + for i, d := range r.Data { + if i > 0 { + out.WriteString(",") + } + out.WriteString(redact.Key(d.OldKeyPrefix)) + out.WriteString(" => ") + out.WriteString(redact.Key(d.NewKeyPrefix)) + } + out.WriteRune(']') + return out.String() +} + // GetRewriteRawKeys rewrites rules to the raw key. func GetRewriteRawKeys(file AppliedFile, rewriteRules *RewriteRules) (startKey, endKey []byte, err error) { startID := tablecodec.DecodeTableID(file.GetStartKey()) @@ -290,7 +313,7 @@ func GetRewriteRawKeys(file AppliedFile, rewriteRules *RewriteRules) (startKey, if startID == endID { startKey, rule = rewriteRawKey(file.GetStartKey(), rewriteRules) if rewriteRules != nil && rule == nil { - err = errors.Annotatef(berrors.ErrRestoreInvalidRewrite, "cannot find raw rewrite rule for start key, startKey: %s", redact.Key(file.GetStartKey())) + err = errors.Annotatef(berrors.ErrRestoreInvalidRewrite, "cannot find raw rewrite rule for start key, startKey: %s; self = %s", redact.Key(file.GetStartKey()), rewriteRules) return } endKey, rule = rewriteRawKey(file.GetEndKey(), rewriteRules) @@ -364,7 +387,7 @@ func RewriteRange(rg *rtree.Range, rewriteRules *RewriteRules) (*rtree.Range, er } rg.StartKey, rule = replacePrefix(rg.StartKey, rewriteRules) if rule == nil { - log.Warn("cannot find rewrite rule", logutil.Key("key", rg.StartKey)) + log.Warn("cannot find rewrite rule", logutil.Key("start key", rg.StartKey)) } else { log.Debug( "rewrite start key", @@ -373,7 +396,7 @@ func RewriteRange(rg *rtree.Range, rewriteRules *RewriteRules) (*rtree.Range, er oldKey := rg.EndKey rg.EndKey, rule = replacePrefix(rg.EndKey, rewriteRules) if rule == nil { - log.Warn("cannot find rewrite rule", logutil.Key("key", rg.EndKey)) + log.Warn("cannot find rewrite rule", logutil.Key("end key", rg.EndKey)) } else { log.Debug( "rewrite end key", diff --git a/br/pkg/stream/stream_metas.go b/br/pkg/stream/stream_metas.go index b51923a9638c1..14c65da097472 100644 --- a/br/pkg/stream/stream_metas.go +++ b/br/pkg/stream/stream_metas.go @@ -652,6 +652,17 @@ func (migs Migrations) MergeToBy(seq int, merge func(m1, m2 *pb.Migration) *pb.M return newBase } +// ListAll returns a slice of all migrations in protobuf format. +// This includes the base migration and any additional layers. +func (migs Migrations) ListAll() []*pb.Migration { + pbMigs := make([]*pb.Migration, 0, len(migs.Layers)+1) + pbMigs = append(pbMigs, migs.Base) + for _, m := range migs.Layers { + pbMigs = append(pbMigs, &m.Content) + } + return pbMigs +} + type mergeAndMigrateToConfig struct { interactiveCheck func(context.Context, *pb.Migration) bool alwaysRunTruncate bool diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index c9773a77510d7..7e433fd640cb5 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -100,6 +100,8 @@ const ( defaultStatsConcurrency = 12 defaultBatchFlushInterval = 16 * time.Second defaultFlagDdlBatchSize = 128 + maxRestoreBatchSizeLimit = 10240 + pb = 1024 * 1024 * 1024 * 1024 * 1024 resetSpeedLimitRetryTimes = 3 ) @@ -532,6 +534,10 @@ func (cfg *RestoreConfig) adjustRestoreConfigForStreamRestore() { cfg.PitrConcurrency += 1 log.Info("set restore kv files concurrency", zap.Int("concurrency", int(cfg.PitrConcurrency))) cfg.Config.Concurrency = cfg.PitrConcurrency + if cfg.ConcurrencyPerStore.Value > 0 { + log.Info("set restore compacted sst files concurrency per store", + zap.Int("concurrency", int(cfg.ConcurrencyPerStore.Value))) + } } func configureRestoreClient(ctx context.Context, client *snapclient.SnapClient, cfg *RestoreConfig) error { @@ -765,8 +771,10 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s keepaliveCfg := GetKeepalive(&cfg.Config) keepaliveCfg.PermitWithoutStream = true client := snapclient.NewRestoreClient(mgr.GetPDClient(), mgr.GetPDHTTPClient(), mgr.GetTLSConfig(), keepaliveCfg) + // set to cfg so that restoreStream can use it. + cfg.ConcurrencyPerStore = kvConfigs.ImportGoroutines // using tikv config to set the concurrency-per-store for client. - client.SetConcurrencyPerStore(kvConfigs.ImportGoroutines.Value) + client.SetConcurrencyPerStore(cfg.ConcurrencyPerStore.Value) err := configureRestoreClient(ctx, client, cfg) if err != nil { return errors.Trace(err) @@ -802,7 +810,7 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s } reader := metautil.NewMetaReader(backupMeta, s, &cfg.CipherInfo) - if err = client.LoadSchemaIfNeededAndInitClient(c, backupMeta, u, reader, cfg.LoadStats); err != nil { + if err = client.LoadSchemaIfNeededAndInitClient(c, backupMeta, u, reader, cfg.LoadStats, nil, nil); err != nil { return errors.Trace(err) } @@ -1093,6 +1101,13 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s if err != nil { return errors.Trace(err) } + onProgress := func(n int64) { + if n == 0 { + updateCh.Inc() + return + } + updateCh.IncBy(n) + } if err := client.RestoreTables(ctx, placementRuleManager, createdTables, files, checkpointSetWithTableID, kvConfigs.MergeRegionSize.Value, kvConfigs.MergeRegionKeyCount.Value, // If the command is from BR binary, the ddl.EnableSplitTableRegion is always 0, @@ -1100,7 +1115,7 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s // Notice that `split-region-on-table` configure from TiKV split on the region having data, it may trigger after restore done. // It's recommended to enable TiDB configure `split-table` instead. atomic.LoadUint32(&ddl.EnableSplitTableRegion) == 1, - updateCh, + onProgress, ); err != nil { return errors.Trace(err) } @@ -1126,25 +1141,6 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s finish := dropToBlackhole(ctx, postHandleCh, errCh) - // Reset speed limit. ResetSpeedLimit must be called after client.LoadSchemaIfNeededAndInitClient has been called. - defer func() { - var resetErr error - // In future we may need a mechanism to set speed limit in ttl. like what we do in switchmode. TODO - for retry := 0; retry < resetSpeedLimitRetryTimes; retry++ { - resetErr = client.ResetSpeedLimit(ctx) - if resetErr != nil { - log.Warn("failed to reset speed limit, retry it", - zap.Int("retry time", retry), logutil.ShortError(resetErr)) - time.Sleep(time.Duration(retry+3) * time.Second) - continue - } - break - } - if resetErr != nil { - log.Error("failed to reset speed limit, please reset it manually", zap.Error(resetErr)) - } - }() - select { case err = <-errCh: err = multierr.Append(err, multierr.Combine(Exhaust(errCh)...)) diff --git a/br/pkg/task/restore_raw.go b/br/pkg/task/restore_raw.go index cbcda5679fb57..acb2e48041e64 100644 --- a/br/pkg/task/restore_raw.go +++ b/br/pkg/task/restore_raw.go @@ -4,6 +4,7 @@ package task import ( "context" + "time" "github.com/pingcap/errors" "github.com/pingcap/log" @@ -12,6 +13,7 @@ import ( berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/httputil" + "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/restore" snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" @@ -20,6 +22,7 @@ import ( "github.com/pingcap/tidb/br/pkg/summary" "github.com/spf13/cobra" "github.com/spf13/pflag" + "go.uber.org/zap" ) // RestoreRawConfig is the configuration specific for raw kv restore tasks. @@ -109,7 +112,7 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR return errors.Trace(err) } reader := metautil.NewMetaReader(backupMeta, s, &cfg.CipherInfo) - if err = client.LoadSchemaIfNeededAndInitClient(c, backupMeta, u, reader, true); err != nil { + if err = client.LoadSchemaIfNeededAndInitClient(c, backupMeta, u, reader, true, cfg.StartKey, cfg.EndKey); err != nil { return errors.Trace(err) } @@ -145,8 +148,9 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR int64(len(ranges)+len(files)), !cfg.LogProgress) + onProgress := func(i int64) { updateCh.IncBy(i) } // RawKV restore does not need to rewrite keys. - err = client.SplitPoints(ctx, getEndKeys(ranges), updateCh, true) + err = client.SplitPoints(ctx, getEndKeys(ranges), onProgress, true) if err != nil { return errors.Trace(err) } @@ -158,10 +162,20 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR } defer restore.RestorePostWork(ctx, importModeSwitcher, restoreSchedulers, cfg.Online) - err = client.RestoreRaw(ctx, cfg.StartKey, cfg.EndKey, files, updateCh) + start := time.Now() + err = client.GetRestorer().GoRestore(onProgress, restore.CreateUniqueFileSets(files)) if err != nil { return errors.Trace(err) } + err = client.GetRestorer().WaitUntilFinish() + if err != nil { + return errors.Trace(err) + } + elapsed := time.Since(start) + log.Info("Restore Raw", + logutil.Key("startKey", cfg.StartKey), + logutil.Key("endKey", cfg.EndKey), + zap.Duration("take", elapsed)) // Restore has finished. updateCh.Close() diff --git a/br/pkg/task/restore_txn.go b/br/pkg/task/restore_txn.go index d686eb97e2154..2af64a59602cc 100644 --- a/br/pkg/task/restore_txn.go +++ b/br/pkg/task/restore_txn.go @@ -15,6 +15,7 @@ import ( snapclient "github.com/pingcap/tidb/br/pkg/restore/snap_client" restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" "github.com/pingcap/tidb/br/pkg/summary" + "go.uber.org/zap" ) // RunRestoreTxn starts a txn kv restore task inside the current goroutine. @@ -54,7 +55,7 @@ func RunRestoreTxn(c context.Context, g glue.Glue, cmdName string, cfg *Config) return errors.Trace(err) } reader := metautil.NewMetaReader(backupMeta, s, &cfg.CipherInfo) - if err = client.LoadSchemaIfNeededAndInitClient(c, backupMeta, u, reader, true); err != nil { + if err = client.LoadSchemaIfNeededAndInitClient(c, backupMeta, u, reader, true, nil, nil); err != nil { return errors.Trace(err) } @@ -72,6 +73,7 @@ func RunRestoreTxn(c context.Context, g glue.Glue, cmdName string, cfg *Config) } summary.CollectInt("restore files", len(files)) + log.Info("restore files", zap.Int("count", len(files))) ranges, _, err := restoreutils.MergeAndRewriteFileRanges( files, nil, conn.DefaultMergeRegionSizeBytes, conn.DefaultMergeRegionKeyCount) if err != nil { @@ -86,8 +88,9 @@ func RunRestoreTxn(c context.Context, g glue.Glue, cmdName string, cfg *Config) int64(len(ranges)+len(files)), !cfg.LogProgress) + onProgress := func(i int64) { updateCh.IncBy(i) } // RawKV restore does not need to rewrite keys. - err = client.SplitPoints(ctx, getEndKeys(ranges), updateCh, false) + err = client.SplitPoints(ctx, getEndKeys(ranges), onProgress, false) if err != nil { return errors.Trace(err) } @@ -99,11 +102,14 @@ func RunRestoreTxn(c context.Context, g glue.Glue, cmdName string, cfg *Config) } defer restore.RestorePostWork(ctx, importModeSwitcher, restoreSchedulers, false) - err = client.WaitForFilesRestored(ctx, files, updateCh) + err = client.GetRestorer().GoRestore(onProgress, restore.CreateUniqueFileSets(files)) + if err != nil { + return errors.Trace(err) + } + err = client.GetRestorer().WaitUntilFinish() if err != nil { return errors.Trace(err) } - // Restore has finished. updateCh.Close() diff --git a/br/pkg/task/stream.go b/br/pkg/task/stream.go index 13444ddf6d825..761adb4418dae 100644 --- a/br/pkg/task/stream.go +++ b/br/pkg/task/stream.go @@ -1303,7 +1303,7 @@ func restoreStream( if err != nil { return errors.Annotate(err, "failed to create restore client") } - defer client.Close() + defer client.Close(ctx) if taskInfo != nil && taskInfo.Metadata != nil { // reuse the task's rewrite ts @@ -1355,24 +1355,18 @@ func restoreStream( log.Info("finish restoring gc") }() - var checkpointRunner *checkpoint.CheckpointRunner[checkpoint.LogRestoreKeyType, checkpoint.LogRestoreValueType] + var sstCheckpointSets map[string]struct{} if cfg.UseCheckpoint { oldRatioFromCheckpoint, err := client.InitCheckpointMetadataForLogRestore(ctx, cfg.StartTS, cfg.RestoreTS, oldRatio, cfg.tiflashRecorder) if err != nil { return errors.Trace(err) } oldRatio = oldRatioFromCheckpoint - - checkpointRunner, err = client.StartCheckpointRunnerForLogRestore(ctx, g, mgr.GetStorage()) + sstCheckpointSets, err = client.InitCheckpointMetadataForCompactedSstRestore(ctx) if err != nil { return errors.Trace(err) } - defer func() { - log.Info("wait for flush checkpoint...") - checkpointRunner.WaitForFinish(ctx, !gcDisabledRestorable) - }() } - encryptionManager, err := encryption.NewManager(&cfg.LogBackupCipherInfo, &cfg.MasterKeyConfig) if err != nil { return errors.Annotate(err, "failed to create encryption manager for log restore") @@ -1382,6 +1376,11 @@ func restoreStream( if err != nil { return err } + migs, err := client.GetMigrations(ctx) + if err != nil { + return errors.Trace(err) + } + client.BuildMigrations(migs) // get full backup meta storage to generate rewrite rules. fullBackupStorage, err := parseFullBackupTablesStorage(cfg) @@ -1422,8 +1421,7 @@ func restoreStream( totalKVCount += kvCount totalSize += size } - dataFileCount := 0 - ddlFiles, err := client.LoadDDLFilesAndCountDMLFiles(ctx, &dataFileCount) + ddlFiles, err := client.LoadDDLFilesAndCountDMLFiles(ctx) if err != nil { return err } @@ -1442,46 +1440,61 @@ func restoreStream( return errors.Trace(err) } - // generate the upstream->downstream id maps for checkpoint - idrules := make(map[int64]int64) - downstreamIdset := make(map[int64]struct{}) - for upstreamId, rule := range rewriteRules { - downstreamId := restoreutils.GetRewriteTableID(upstreamId, rule) - idrules[upstreamId] = downstreamId - downstreamIdset[downstreamId] = struct{}{} + logFilesIter, err := client.LoadDMLFiles(ctx) + if err != nil { + return errors.Trace(err) } - logFilesIter, err := client.LoadDMLFiles(ctx) + compactionIter := client.LogFileManager.GetCompactionIter(ctx) + + se, err := g.CreateSession(mgr.GetStorage()) if err != nil { return errors.Trace(err) } - pd := g.StartProgress(ctx, "Restore KV Files", int64(dataFileCount), !cfg.LogProgress) + execCtx := se.GetSessionCtx().GetRestrictedSQLExecutor() + splitSize, splitKeys := utils.GetRegionSplitInfo(execCtx) + log.Info("[Log Restore] get split threshold from tikv config", zap.Uint64("split-size", splitSize), zap.Int64("split-keys", splitKeys)) + + pd := g.StartProgress(ctx, "Restore Files(SST + KV)", logclient.TotalEntryCount, !cfg.LogProgress) err = withProgress(pd, func(p glue.Progress) (pErr error) { + updateStatsWithCheckpoint := func(kvCount, size uint64) { + mu.Lock() + defer mu.Unlock() + totalKVCount += kvCount + totalSize += size + checkpointTotalKVCount += kvCount + checkpointTotalSize += size + // increase the progress + p.IncBy(int64(kvCount)) + } + compactedSplitIter, err := client.WrapCompactedFilesIterWithSplitHelper( + ctx, compactionIter, rewriteRules, sstCheckpointSets, + updateStatsWithCheckpoint, splitSize, splitKeys, + ) + if err != nil { + return errors.Trace(err) + } + + err = client.RestoreCompactedSstFiles(ctx, compactedSplitIter, rewriteRules, importModeSwitcher, p.IncBy) + if err != nil { + return errors.Trace(err) + } + + logFilesIterWithSplit, err := client.WrapLogFilesIterWithSplitHelper(ctx, logFilesIter, execCtx, rewriteRules, updateStatsWithCheckpoint, splitSize, splitKeys) + if err != nil { + return errors.Trace(err) + } + if cfg.UseCheckpoint { - updateStatsWithCheckpoint := func(kvCount, size uint64) { - mu.Lock() - defer mu.Unlock() - totalKVCount += kvCount - totalSize += size - checkpointTotalKVCount += kvCount - checkpointTotalSize += size - } - logFilesIter, err = client.WrapLogFilesIterWithCheckpoint(ctx, logFilesIter, downstreamIdset, updateStatsWithCheckpoint, p.Inc) - if err != nil { - return errors.Trace(err) - } + // TODO make a failpoint iter inside the logclient. failpoint.Inject("corrupt-files", func(v failpoint.Value) { var retErr error - logFilesIter, retErr = logclient.WrapLogFilesIterWithCheckpointFailpoint(v, logFilesIter, rewriteRules) + logFilesIterWithSplit, retErr = logclient.WrapLogFilesIterWithCheckpointFailpoint(v, logFilesIterWithSplit, rewriteRules) defer func() { pErr = retErr }() }) } - logFilesIterWithSplit, err := client.WrapLogFilesIterWithSplitHelper(logFilesIter, rewriteRules, g, mgr.GetStorage()) - if err != nil { - return errors.Trace(err) - } - return client.RestoreKVFiles(ctx, rewriteRules, idrules, logFilesIterWithSplit, checkpointRunner, + return client.RestoreKVFiles(ctx, rewriteRules, logFilesIterWithSplit, cfg.PitrBatchCount, cfg.PitrBatchSize, updateStats, p.IncBy, &cfg.LogBackupCipherInfo, cfg.MasterKeyConfig.MasterKeys) }) if err != nil { @@ -1536,7 +1549,7 @@ func restoreStream( } failpoint.Inject("do-checksum-with-rewrite-rules", func(_ failpoint.Value) { - if err := client.FailpointDoChecksumForLogRestore(ctx, mgr.GetStorage().GetClient(), mgr.GetPDClient(), idrules, rewriteRules); err != nil { + if err := client.FailpointDoChecksumForLogRestore(ctx, mgr.GetStorage().GetClient(), mgr.GetPDClient(), rewriteRules); err != nil { failpoint.Return(errors.Annotate(err, "failed to do checksum")) } }) @@ -1551,13 +1564,14 @@ func createRestoreClient(ctx context.Context, g glue.Glue, cfg *RestoreConfig, m keepaliveCfg := GetKeepalive(&cfg.Config) keepaliveCfg.PermitWithoutStream = true client := logclient.NewRestoreClient(mgr.GetPDClient(), mgr.GetPDHTTPClient(), mgr.GetTLSConfig(), keepaliveCfg) - err = client.Init(g, mgr.GetStorage()) + + err = client.Init(ctx, g, mgr.GetStorage()) if err != nil { return nil, errors.Trace(err) } defer func() { if err != nil { - client.Close() + client.Close(ctx) } }() @@ -1571,9 +1585,20 @@ func createRestoreClient(ctx context.Context, g glue.Glue, cfg *RestoreConfig, m return nil, errors.Trace(err) } client.SetCrypter(&cfg.CipherInfo) - client.SetConcurrency(uint(cfg.Concurrency)) client.SetUpstreamClusterID(cfg.upstreamClusterID) - client.InitClients(ctx, u) + + createCheckpointSessionFn := func() (glue.Session, error) { + // always create a new session for checkpoint runner + // because session is not thread safe + if cfg.UseCheckpoint { + return g.CreateSession(mgr.GetStorage()) + } + return nil, nil + } + err = client.InitClients(ctx, u, createCheckpointSessionFn, uint(cfg.Concurrency), cfg.ConcurrencyPerStore.Value) + if err != nil { + return nil, errors.Trace(err) + } err = client.SetRawKVBatchClient(ctx, cfg.PD, cfg.TLS.ToKVSecurity()) if err != nil { diff --git a/br/pkg/utils/iter/combinator_types.go b/br/pkg/utils/iter/combinator_types.go index 34288d104236c..af71a83adf5fb 100644 --- a/br/pkg/utils/iter/combinator_types.go +++ b/br/pkg/utils/iter/combinator_types.go @@ -124,6 +124,26 @@ func (f filterMap[T, R]) TryNext(ctx context.Context) IterResult[R] { } } +type tryMap[T, R any] struct { + inner TryNextor[T] + + mapper func(T) (R, error) +} + +func (t tryMap[T, R]) TryNext(ctx context.Context) IterResult[R] { + r := t.inner.TryNext(ctx) + + if r.FinishedOrError() { + return DoneBy[R](r) + } + + res, err := t.mapper(r.Item) + if err != nil { + return Throw[R](err) + } + return Emit(res) +} + type join[T any] struct { inner TryNextor[TryNextor[T]] diff --git a/br/pkg/utils/iter/combinators.go b/br/pkg/utils/iter/combinators.go index 6247237161912..1b7c93f55708c 100644 --- a/br/pkg/utils/iter/combinators.go +++ b/br/pkg/utils/iter/combinators.go @@ -91,6 +91,13 @@ func MapFilter[T, R any](it TryNextor[T], mapper func(T) (R, bool)) TryNextor[R] } } +func TryMap[T, R any](it TryNextor[T], mapper func(T) (R, error)) TryNextor[R] { + return tryMap[T, R]{ + inner: it, + mapper: mapper, + } +} + // ConcatAll concatenates all elements yields by the iterators. // In another word, it 'chains' all the input iterators. func ConcatAll[T any](items ...TryNextor[T]) TryNextor[T] { From 5a6c39136154c86045342c86925c140176f0e2cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B1=B1=E5=B2=9A?= <36239017+YuJuncen@users.noreply.github.com> Date: Wed, 20 Nov 2024 15:50:44 +0800 Subject: [PATCH 05/17] external_storage: implement locking (#56597) close pingcap/tidb#56523 --- br/pkg/storage/locking.go | 253 ++++++++++++++++-- br/pkg/storage/locking_test.go | 66 ++++- br/pkg/task/stream.go | 5 +- ...rify-transactions-for-external-storages.md | 233 ++++++++++++++++ .../imgs/write-and-verfiy-tla-states.pdf | Bin 0 -> 34414 bytes docs/design/imgs/write-and-verify-tla.pdf | Bin 0 -> 168008 bytes 6 files changed, 520 insertions(+), 37 deletions(-) create mode 100644 docs/design/2024-10-11-put-and-verify-transactions-for-external-storages.md create mode 100644 docs/design/imgs/write-and-verfiy-tla-states.pdf create mode 100644 docs/design/imgs/write-and-verify-tla.pdf diff --git a/br/pkg/storage/locking.go b/br/pkg/storage/locking.go index 09486a032c88e..67ea7c2003081 100644 --- a/br/pkg/storage/locking.go +++ b/br/pkg/storage/locking.go @@ -3,23 +3,131 @@ package storage import ( + "bytes" "context" + "encoding/hex" "encoding/json" "fmt" + "math/rand" "os" + "path" "time" + "github.com/google/uuid" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/logutil" + "go.uber.org/multierr" "go.uber.org/zap" ) +// conditionalPut is a write that in a strong consistency storage. +// +// It provides a `Verify` hook and a `VerifyWriteContext`, you may check +// the conditions you wanting there. +// +// if the write is success and the file wasn't deleted, no other `conditionalPut` +// over the same file was success. +// +// For more details, check docs/design/2024-10-11-put-and-verify-transactions-for-external-storages.md. +type conditionalPut struct { + // Target is the target file of this txn. + // There shouldn't be other files shares this prefix with this file, or the txn will fail. + Target string + // Content is the content that needed to be written to that file. + Content func(txnID uuid.UUID) []byte + // Verify allows you add other preconditions to the write. + // This will be called when the write is allowed and about to be performed. + // If `Verify()` returns an error, the write will be aborted. + Verify func(ctx VerifyWriteContext) error +} + +type VerifyWriteContext struct { + context.Context + Target string + Storage ExternalStorage + TxnID uuid.UUID +} + +func (cx *VerifyWriteContext) IntentFileName() string { + return fmt.Sprintf("%s.INTENT.%s", cx.Target, hex.EncodeToString(cx.TxnID[:])) +} + +// CommitTo commits the write to the external storage. +// It contains two phases: +// - Intention phase, it will write an "intention" file named "$Target_$TxnID". +// - Put phase, here it actually write the "$Target" down. +// +// In each phase, before writing, it will verify whether the storage is suitable for writing, that is: +// - There shouldn't be any other intention files. +// - Verify() returns no error. (If there is one.) +func (w conditionalPut) CommitTo(ctx context.Context, s ExternalStorage) (uuid.UUID, error) { + txnID := uuid.New() + cx := VerifyWriteContext{ + Context: ctx, + Target: w.Target, + Storage: s, + TxnID: txnID, + } + intentFileName := cx.IntentFileName() + checkConflict := func() error { + var err error + if w.Verify != nil { + err = multierr.Append(err, w.Verify(cx)) + } + return multierr.Append(err, cx.assertOnlyMyIntent()) + } + + if err := checkConflict(); err != nil { + return uuid.UUID{}, errors.Annotate(err, "during initial check") + } + failpoint.Inject("exclusive-write-commit-to-1", func() {}) + + if err := s.WriteFile(cx, intentFileName, []byte{}); err != nil { + return uuid.UUID{}, errors.Annotate(err, "during writing intention file") + } + + deleteIntentionFile := func() { + if err := s.DeleteFile(cx, intentFileName); err != nil { + log.Warn("Cannot delete the intention file, you may delete it manually.", zap.String("file", intentFileName), logutil.ShortError(err)) + } + } + defer deleteIntentionFile() + if err := checkConflict(); err != nil { + return uuid.UUID{}, errors.Annotate(err, "during checking whether there are other intentions") + } + failpoint.Inject("exclusive-write-commit-to-2", func() {}) + + return txnID, s.WriteFile(cx, w.Target, w.Content(txnID)) +} + +// assertNoOtherOfPrefixExpect asserts that there is no other file with the same prefix than the expect file. +func (cx VerifyWriteContext) assertNoOtherOfPrefixExpect(pfx string, expect string) error { + fileName := path.Base(pfx) + dirName := path.Dir(pfx) + return cx.Storage.WalkDir(cx, &WalkOption{ + SubDir: dirName, + ObjPrefix: fileName, + }, func(path string, size int64) error { + if path != expect { + return fmt.Errorf("there is conflict file %s", path) + } + return nil + }) +} + +// assertOnlyMyIntent asserts that there is no other intention file than our intention file. +func (cx VerifyWriteContext) assertOnlyMyIntent() error { + return cx.assertNoOtherOfPrefixExpect(cx.Target, cx.IntentFileName()) +} + // LockMeta is the meta information of a lock. type LockMeta struct { LockedAt time.Time `json:"locked_at"` LockerHost string `json:"locker_host"` LockerPID int `json:"locker_pid"` + TxnID []byte `json:"txn_id"` Hint string `json:"hint"` } @@ -67,16 +175,18 @@ func readLockMeta(ctx context.Context, storage ExternalStorage, path string) (Lo return meta, nil } -func putLockMeta(ctx context.Context, storage ExternalStorage, path string, meta LockMeta) error { - file, err := json.Marshal(meta) - if err != nil { - return errors.Annotatef(err, "failed to marshal lock meta %s", path) - } - err = storage.WriteFile(ctx, path, file) +type RemoteLock struct { + txnID uuid.UUID + storage ExternalStorage + path string +} + +func tryFetchRemoteLock(ctx context.Context, storage ExternalStorage, path string) error { + meta, err := readLockMeta(ctx, storage, path) if err != nil { - return errors.Annotatef(err, "failed to write lock meta at %s", path) + return err } - return nil + return ErrLocked{Meta: meta} } // TryLockRemote tries to create a "lock file" at the external storage. @@ -84,30 +194,37 @@ func putLockMeta(ctx context.Context, storage ExternalStorage, path string, meta // Will return a `ErrLocked` if there is another process already creates the lock file. // This isn't a strict lock like flock in linux: that means, the lock might be forced removed by // manually deleting the "lock file" in external storage. -func TryLockRemote(ctx context.Context, storage ExternalStorage, path, hint string) (err error) { - defer func() { - log.Info("Trying lock remote file.", zap.String("path", path), zap.String("hint", hint), logutil.ShortError(err)) - }() - exists, err := storage.FileExists(ctx, path) - if err != nil { - return errors.Annotatef(err, "failed to check lock file %s exists", path) - } - if exists { - meta, err := readLockMeta(ctx, storage, path) - if err != nil { - return err - } - return ErrLocked{Meta: meta} +func TryLockRemote(ctx context.Context, storage ExternalStorage, path, hint string) (lock RemoteLock, err error) { + writer := conditionalPut{ + Target: path, + Content: func(txnID uuid.UUID) []byte { + meta := MakeLockMeta(hint) + meta.TxnID = txnID[:] + res, err := json.Marshal(meta) + if err != nil { + log.Panic( + "Unreachable: a trivial object cannot be marshaled to JSON.", + zap.String("path", path), + logutil.ShortError(err), + ) + } + return res + }, } - meta := MakeLockMeta(hint) - return putLockMeta(ctx, storage, path, meta) + lock.storage = storage + lock.path = path + lock.txnID, err = writer.CommitTo(ctx, storage) + if err != nil { + err = errors.Annotatef(err, "there is something about the lock: %s", tryFetchRemoteLock(ctx, storage, path)) + } + return } // UnlockRemote removes the lock file at the specified path. // Removing that file will release the lock. -func UnlockRemote(ctx context.Context, storage ExternalStorage, path string) error { - meta, err := readLockMeta(ctx, storage, path) +func (l RemoteLock) Unlock(ctx context.Context) error { + meta, err := readLockMeta(ctx, l.storage, l.path) if err != nil { return err } @@ -115,10 +232,88 @@ func UnlockRemote(ctx context.Context, storage ExternalStorage, path string) err // operation in our ExternalStorage abstraction. // So, once our lock has been overwritten or we are overwriting other's lock, // this information will be useful for troubleshooting. - log.Info("Releasing lock.", zap.Stringer("meta", meta), zap.String("path", path)) - err = storage.DeleteFile(ctx, path) + if !bytes.Equal(l.txnID[:], meta.TxnID) { + return errors.Errorf("Txn ID mismatch: remote is %v, our is %v", meta.TxnID, l.txnID) + } + + log.Info("Releasing lock.", zap.Stringer("meta", meta), zap.String("path", l.path)) + err = l.storage.DeleteFile(ctx, l.path) if err != nil { - return errors.Annotatef(err, "failed to delete lock file %s", path) + return errors.Annotatef(err, "failed to delete lock file %s", l.path) } return nil } + +func writeLockName(path string) string { + return fmt.Sprintf("%s.WRIT", path) +} + +func newReadLockName(path string) string { + readID := rand.Int63() + return fmt.Sprintf("%s.READ.%016x", path, readID) +} + +func TryLockRemoteWrite(ctx context.Context, storage ExternalStorage, path, hint string) (lock RemoteLock, err error) { + target := writeLockName(path) + writer := conditionalPut{ + Target: target, + Content: func(txnID uuid.UUID) []byte { + meta := MakeLockMeta(hint) + meta.TxnID = txnID[:] + res, err := json.Marshal(meta) + if err != nil { + log.Panic( + "Unreachable: a plain object cannot be marshaled to JSON.", + zap.String("path", path), + logutil.ShortError(err), + ) + } + return res + }, + Verify: func(ctx VerifyWriteContext) error { + return ctx.assertNoOtherOfPrefixExpect(path, ctx.IntentFileName()) + }, + } + + lock.storage = storage + lock.path = target + lock.txnID, err = writer.CommitTo(ctx, storage) + if err != nil { + err = errors.Annotatef(err, "there is something about the lock: %s", tryFetchRemoteLock(ctx, storage, target)) + } + return +} + +func TryLockRemoteRead(ctx context.Context, storage ExternalStorage, path, hint string) (lock RemoteLock, err error) { + target := newReadLockName(path) + writeLock := writeLockName(path) + writer := conditionalPut{ + Target: target, + Content: func(txnID uuid.UUID) []byte { + meta := MakeLockMeta(hint) + meta.TxnID = txnID[:] + res, err := json.Marshal(meta) + if err != nil { + log.Panic( + "Unreachable: a trivial object cannot be marshaled to JSON.", + zap.String("path", path), + logutil.ShortError(err), + ) + } + return res + }, + Verify: func(ctx VerifyWriteContext) error { + return ctx.assertNoOtherOfPrefixExpect(writeLock, "") + }, + } + + lock.storage = storage + lock.path = target + lock.txnID, err = writer.CommitTo(ctx, storage) + if err != nil { + err = errors.Annotatef(err, "failed to commit the lock due to existing lock: "+ + "there is something about the lock: %s", tryFetchRemoteLock(ctx, storage, writeLock)) + } + + return +} diff --git a/br/pkg/storage/locking_test.go b/br/pkg/storage/locking_test.go index ab6056c324714..dc8757db7b774 100644 --- a/br/pkg/storage/locking_test.go +++ b/br/pkg/storage/locking_test.go @@ -8,6 +8,7 @@ import ( "path/filepath" "testing" + "github.com/pingcap/failpoint" backup "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/br/pkg/storage" "github.com/stretchr/testify/require" @@ -39,10 +40,10 @@ func requireFileNotExists(t *testing.T, path string) { func TestTryLockRemote(t *testing.T) { ctx := context.Background() strg, pth := createMockStorage(t) - err := storage.TryLockRemote(ctx, strg, "test.lock", "This file is mine!") + lock, err := storage.TryLockRemote(ctx, strg, "test.lock", "This file is mine!") require.NoError(t, err) requireFileExists(t, filepath.Join(pth, "test.lock")) - err = storage.UnlockRemote(ctx, strg, "test.lock") + err = lock.Unlock(ctx) require.NoError(t, err) requireFileNotExists(t, filepath.Join(pth, "test.lock")) } @@ -50,12 +51,65 @@ func TestTryLockRemote(t *testing.T) { func TestConflictLock(t *testing.T) { ctx := context.Background() strg, pth := createMockStorage(t) - err := storage.TryLockRemote(ctx, strg, "test.lock", "This file is mine!") + lock, err := storage.TryLockRemote(ctx, strg, "test.lock", "This file is mine!") require.NoError(t, err) - err = storage.TryLockRemote(ctx, strg, "test.lock", "This file is mine!") - require.ErrorContains(t, err, "locked, meta = Locked") + _, err = storage.TryLockRemote(ctx, strg, "test.lock", "This file is mine!") + require.ErrorContains(t, err, "conflict file test.lock") requireFileExists(t, filepath.Join(pth, "test.lock")) - err = storage.UnlockRemote(ctx, strg, "test.lock") + err = lock.Unlock(ctx) require.NoError(t, err) requireFileNotExists(t, filepath.Join(pth, "test.lock")) } + +func TestRWLock(t *testing.T) { + ctx := context.Background() + strg, path := createMockStorage(t) + lock, err := storage.TryLockRemoteRead(ctx, strg, "test.lock", "I wanna read it!") + require.NoError(t, err) + lock2, err := storage.TryLockRemoteRead(ctx, strg, "test.lock", "I wanna read it too!") + require.NoError(t, err) + _, err = storage.TryLockRemoteWrite(ctx, strg, "test.lock", "I wanna write it, you get out!") + require.Error(t, err) + require.NoError(t, lock.Unlock(ctx)) + require.NoError(t, lock2.Unlock(ctx)) + l, err := storage.TryLockRemoteWrite(ctx, strg, "test.lock", "Can I have a write lock?") + require.NoError(t, err) + requireFileExists(t, filepath.Join(path, "test.lock.WRIT")) + require.NoError(t, l.Unlock(ctx)) + requireFileNotExists(t, filepath.Join(path, "test.lock.WRIT")) +} + +func TestConcurrentLock(t *testing.T) { + ctx := context.Background() + strg, path := createMockStorage(t) + + errChA := make(chan error, 1) + errChB := make(chan error, 1) + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/br/pkg/storage/exclusive-write-commit-to-1", "1*pause")) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/br/pkg/storage/exclusive-write-commit-to-2", "1*pause")) + + go func() { + _, err := storage.TryLockRemote(ctx, strg, "test.lock", "I wanna read it, but I hesitated before send my intention!") + errChA <- err + }() + + go func() { + _, err := storage.TryLockRemote(ctx, strg, "test.lock", "I wanna read it too, but I hesitated before committing!") + errChB <- err + }() + + failpoint.Disable("github.com/pingcap/tidb/br/pkg/storage/exclusive-write-commit-to-1") + failpoint.Disable("github.com/pingcap/tidb/br/pkg/storage/exclusive-write-commit-to-2") + + // There is exactly one error. + errA := <-errChA + errB := <-errChB + if errA == nil { + require.Error(t, errB) + } else { + require.NoError(t, errB) + } + + requireFileExists(t, filepath.Join(path, "test.lock")) +} diff --git a/br/pkg/task/stream.go b/br/pkg/task/stream.go index 761adb4418dae..b69c857fa9aaa 100644 --- a/br/pkg/task/stream.go +++ b/br/pkg/task/stream.go @@ -1025,11 +1025,12 @@ func RunStreamTruncate(c context.Context, g glue.Glue, cmdName string, cfg *Stre if err != nil { return err } - if err := storage.TryLockRemote(ctx, extStorage, truncateLockPath, hintOnTruncateLock); err != nil { + lock, err := storage.TryLockRemote(ctx, extStorage, truncateLockPath, hintOnTruncateLock) + if err != nil { return err } defer utils.WithCleanUp(&err, 10*time.Second, func(ctx context.Context) error { - return storage.UnlockRemote(ctx, extStorage, truncateLockPath) + return lock.Unlock(ctx) }) sp, err := stream.GetTSFromFile(ctx, extStorage, stream.TruncateSafePointFileName) diff --git a/docs/design/2024-10-11-put-and-verify-transactions-for-external-storages.md b/docs/design/2024-10-11-put-and-verify-transactions-for-external-storages.md new file mode 100644 index 0000000000000..666b8a9cee068 --- /dev/null +++ b/docs/design/2024-10-11-put-and-verify-transactions-for-external-storages.md @@ -0,0 +1,233 @@ +# Put and Verify Transactions for External Storages + +- Author: [Yu Juncen](https://github.com/YuJuncen) +- Tracking Issue: https://github.com/pingcap/tidb/issues/56523 + +## Background + +Sometimes, we need to control concurrency access to the same backup archive, like: + +- When compacting / restoring, we want to block migrating to a new version. +- When migrating the backup storage to a new version, we want to forbid reading. +- When truncating the storage, we don't want another truncating operation happen. +- When backing up, we don't want another backup uses the same storage. + +But external storage locking isn't trivial. Simply putting a lock file isn't safe enough: because after checking there isn't such a lock file, another one may write it immediately. Object locks provide stronger consistency, but also require extra configuration and permissions. Most object storages also support "conditional write", which is lighter-weighted than object locks in the concurrency control scenario. But both object locks and conditional write are focus on "entities", the available conditions are restricted: you cannot say, "if the prefix `/competitor` doesn't contain any file, write `/me`.", at least for now (mid 2024). + +This proposal will propose a new procedure for locking / unlocking, which is safe in all object storages that have a *strong consistency* guarantee over its PUT, GET and LIST API. This has been promised in: + +- S3: https://aws.amazon.com/cn/s3/consistency/ +- Google Cloud Storage: https://cloud.google.com/storage/docs/consistency#strongly_consistent_operations +- Azure Blob Storage: https://github.com/MicrosoftDocs/azure-docs/issues/105331#issuecomment-1450252384 (But no official documents found yet :( ) + +## Spec + +A put-and-verify transaction looks like: + +```go +type VerifyWriteContext struct { + context.Context + Target string + Storage ExternalStorage + TxnID uuid.UUID +} + +type ExclusiveWrite struct { + // Target is the target file of this txn. + // There shouldn't be other files shares this prefix with this file, or the txn will fail. + Target string + // Content is the content that needed to be written to that file. + Content func(txnID uuid.UUID) []byte + // Verify allows you add other preconditions to the write. + // This will be called when the write is allowed and about to be performed. + // If `Verify()` returns an error, the write will be aborted. + // + // With this, you may add extra preconditions of committing in usecases like RWLock. + Verify func(ctx VerifyWriteContext) error +} +``` + +After successfully committing such a PutAndVerify transaction, the following invariants are kept: + +- The `Target` should be written exactly once. +- The function `Verify()` returns no error after this transaction committed, if the function `Verify` satisfies: + - Once it returns non-error, it will always return non-error as long as exactly one file has the Prefix(). + +A PutAndVerify txn was committed by: + +- Put a intention file with a random suffix to the `Target`. +- Check if there is another file in the `Target`. +- If there is, remove our intention file and back off or report error to caller. +- If there isn't, and `Verify()` returns no error, put the `Content()` to the `Target`, without suffix, then remove the intention file. + +Here is the detailed code for committing such a transaction: + +```go +func (w ExclusiveWrite) CommitTo(ctx context.Context, s ExternalStorage) (uuid.UUID, error) { + txnID := uuid.New() + cx := VerifyWriteContext{ + Context: ctx, + Target: w.Target, + Storage: s, + TxnID: txnID, + } + intentFileName := cx.IntentFileName() // Should be "{Target}.INTENT.{UUID}" + checkConflict := func() error { + var err error + if w.Verify != nil { + err = multierr.Append(err, w.Verify(cx)) + } + return multierr.Append(err, cx.assertOnlyMyIntent()) + } + + if err := checkConflict(); err != nil { + return uuid.UUID{}, errors.Annotate(err, "during initial check") + } + if err := s.WriteFile(cx, intentFileName, []byte{}); err != nil { + return uuid.UUID{}, errors.Annotate(err, "during writing intention file") + } + defer s.DeleteFile(cx, intentFileName) + if err := checkConflict(); err != nil { + return uuid.UUID{}, errors.Annotate(err, "during checking whether there are other intentions") + } + + return txnID, s.WriteFile(cx, w.Target, w.Content(txnID)) +} +``` + + + +An example of the txn aborting, when there are two conflicting txns, the name of intent files are simplified for reading: + +| Alice's Txn | Bob's Txn | +| ---------------------------------------- | ------------------------------------------ | +| intentFile := "LOCK_Alice" | intentFile := "LOCK_Bob" | +| | Verify() → **OK** | +| Verify() → **OK** | | +| | Write("LOCK_Bob", "") → **OK** | +| Write("LOCK_Alice", "") → **OK** | | +| | Verify() → **Failed! "LOCK_Alice" exists** | +| Verify() → **Failed! "Lock_Bob" exists** | | +| | Delete("LOCK_Bob") → **OK** | +| Delete("LOCK_Alice") → **OK** | | +| ABORT | ABORT | + +Then, they may retry committing. + +| Alice's Txn | Bob's Txn | +| -------------------------------------------------- | ------------------------------------------ | +| intentFile := "LOCK_Alice" | intentFile := "LOCK_Bob" | +| | Verify() → **OK** | +| Verify() → **OK** | | +| | Write("LOCK_Bob", "") → **OK** | +| Write("LOCK_Alice", "") → **OK** | | +| | Verify() → **Failed! "LOCK_Alice" exists** | +| | Delete("LOCK_Bob") → **OK** | +| Verify() → **OK** | | +| Write("LOCK_Alice","Alice owns the lock") → **OK** | | +| COMMITTED | ABORT | + +This time, Alice is lucky enough, she committes her transaction, Bob gives up committing when he realizes that there is a conflicting transaction. + +## Correctness + +### Atomic CAS + +Here is a TLA+ module that describes the algorithm. + +You can find a rendered version [here](imgs/write-and-verify-tla.pdf). + +The theorem have been checked by a TLA+ model, [here](imgs/write-and-verfiy-tla-states.pdf) is the state transform graph with 2 clients, you may check it manually. + +### Invariant of `Verify()` + +It looks trivial: the last call to `Verify` makes sure there should be exactly one file that has the prefix. +By its definition, it should always return nil before the file is removed. + +## Example 1: Mutex + +An empty `Verify()` function is enough for implementing a Mutex in the external storage. + +```go +func TryLockRemote(ctx context.Context, storage ExternalStorage, path) error { + writer := ExclusiveWrite{ + Target: path, + Content: func(_ uuid.UUID) []byte { + return []byte("I got the lock :D") + }, + } + + _, err = writer.CommitTo(ctx, storage) + return err +} +``` + +## Example 2: RwLock + +We can use `Verify()` to check whether there is a conflicting lock. + +Before we start, let's involve a helper function of the verify context: + +```go +// assertNoOtherOfPrefixExpect asserts that there is no other file with the same prefix than the expect file. +func (cx VerifyWriteContext) assertNoOtherOfPrefixExpect(pfx string, expect string) error { + fileName := path.Base(pfx) + dirName := path.Dir(pfx) + return cx.Storage.WalkDir(cx, &WalkOption{ + SubDir: dirName, + ObjPrefix: fileName, + }, func(path string, size int64) error { + if path != expect { + return fmt.Errorf("there is conflict file %s", path) + } + return nil + }) +} +``` + +Then, when adding a write lock, we need to verify that there isn't anyone holds any sort of locks... + +Be aware that we are going to put a lock file at `"$path.WRIT"` instead of `"$path"`. + +```go +func TryLockRemoteWrite(ctx context.Context, storage ExternalStorage, path string) error { + target := fmt.Sprintf("%s.WRIT", path) + writer := ExclusiveWrite{ + Target: target, + Content: func(txnID uuid.UUID) []byte { + return []byte("I'm going to write something down :<") + }, + Verify: func(ctx VerifyWriteContext) error { + return ctx.assertNoOtherOfPrefixExpect(path, ctx.IntentFileName()) + }, + } + + _, err = writer.CommitTo(ctx, storage) + return err +} +``` + +When putting a read lock, we need to check that if there is a write lock... + +```go +func TryLockRemoteRead(ctx context.Context, storage ExternalStorage, path string) error + readID := rand.Int63() + target := fmt.Sprintf("%s.READ.%016x", path, readID) + writeLock := fmt.Sprintf("%s.WRIT", target) + writer := ExclusiveWrite{ + Target: target, + Content: func(txnID uuid.UUID) []byte { + return []byte("Guess what we will find today =D") + }, + Verify: func(ctx VerifyWriteContext) error { + // Make sure that the write lock doesn't exist. + return ctx.assertNoOtherOfPrefixExpect(writeLock, "") + }, + } + + _, err = writer.CommitTo(ctx, storage) + return err +} +``` + +Notice that we are putting a read lock with a random suffix. So read locks won't conflict with each other. diff --git a/docs/design/imgs/write-and-verfiy-tla-states.pdf b/docs/design/imgs/write-and-verfiy-tla-states.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0d81db91fa24e765972d4ed8684586d180c36ee2 GIT binary patch literal 34414 zcmZs>b8sg?`}P|r8{4*R+uGQ+ZQHhO+qS>4?POzTlbn5icgbcE#cIGY?gslHUO3;Lagbd=AHZG=4 z|7sgU7gJGFV|x=*XnuZZXBQ_^LtAK%oIko-airspzV-E#Z){87Zf4>RN z=J_k%vu_i>4V514?_c-I1->oSqOY^E9(Fh$!{d|Pk_P<8%6mM}|D-2013bUqSbblfgS`g6-jCLn z2yVaMcSE}CZ(935^SyI#zCZPZCw!X%Uem2bJw3e!mxI_&pU(p2zsX)&gXTJmUn0znVoGT zE9W`8P#UzA>EoLf|1oN*3$k40W?1daYcJKl2u~F)7aL!Q9tfc7gGm!;9VuMfB`xk{ z`>}+Ce+-AQ7eD&K?!!94@I9FI8N2`f`*<(hH@&&z}@ zoqy+!1XiIU-ybW8q2z$pi<&{mNk&4N-~+SLe0*XJIgUD$e(k}B%(bAxN-i^KbiDRY z4x0;|7^QM@$u$PX)3uzI545wi*(CoQd$u2(>w|}McV#qd-#5^@<%V{bssko6N=cE{ zNKPU8vZR_~-MvF~FSM7l_3qLs0lVOwQYy2{8VEt*{B4%Y@8p>xm8pQ%FUv*!kLZl4 zC9tsYM)&qT0LWO7)m1JhX+TdK+6iD0pE1=v#T@zPWZW^EjlPqfC1J<_2~Er zx~LCK7+b~Y7_Yz~7L4mC3%u<^iVsij*(W2RxC~8iJ_@;#X#|r<$u;tVsgH@K7kaZE z>P7TeFr8zD^O#NT!?Kv-3t7i+L4`GLco8GKv9UJd(-1e~zhz2da!6@fC|MS)O=%9R ziPILK0l>x5;>IaVD_gxmu|BN|ujE4YQ`1RCr9{PS{(g3}7A}Lw#^XOuY$UA>GIws zlL8X}zJ6ygcfh9vB`3MxK%8WezHFG42Bp`4ddxf5V>SflhG;4vLrNDsTSgUX)n<8q zQ3s-yP#Yfj*}OsuWQG!Gg5(CLf&q1BjO>Dt(K*i?oCVlT9^N|TA}I=KH;|Teiz!6Z zSP+)BitL<|5W>VMP>PkYsq~BNOT9V&WuH28L!j?Ea7M$Y zw*|%xitdwyrC5y8%gZ=gPu3?(%CKT7i5Nm?_yoJzE~ZV#I|DSKKl6Nm`(8RH`T|&n#pCM2ibsLzenM)G?Wg{(t4m#}z$G34sme~@7pVRt!*NrO_@~WT5Rx3 zQ{>rQC-pbd!V!iHcogAR9)G++heg3C{SoNfYM_F>8LtxIFSVzEO3J(Rf_ptPe;@_I zrNnaB2{jkB0VTn6-mA?U5N4`WHOyr)Gy$Mm8F6Rq0mN|#Agt(ep#aElN(POsgYr_y z4_2vQcq&5!g~1gFzSgZg{^)=SK!B8!!y;ibVt_Se+}qsTuoi<3TK)luTanl z;mX=7e09gNhz(;RDK|&xF$}(n$=Sp>qF58kF%imzXZD}?S?PfIQcuS<4MD@GP6s-a z5qMB2(+Zu4;M7w}T&jMSSi9=LH zmUl~DL8GAi3$hlM+9O8N00}{J8juO2Ulz)>#U*b(_9AG{;;SI?UX6nxIS0li5t!)> z>VjUYJ)DbWZ`aHfetMZzf|(MsQ#p) zkfR!P7V@=3D_cQn$~F#@o31u^N<@$(Vv@f&DE8A&80?6PF8(H@b|yo;1iQ$fUNO6f^po4%y){D40%Pb*?Cb zb*EHyamSqXTdcUSG^DvthpqJmfvNj|_suQftNWPO`}M&GIs9?zL;kFAJ|!Z> zzW@CkhyX|~p%!|!%c2xWmvGCW?kd)$S~bTtufn~~EVDKY-x#cKnJD2QI;MQhq7ONml1=+QP_k=~oy zGncC%D*FIDY(^gn~~Zh z<_A#mN{M$A1nFJ4Zh6*!5Sas1Z`Bk2ZS3DYL1Y=a61d$U6n7Y8r)NNGX=y|!epmPZ^tLYTpCabj|YC3vlAkJ77up^hj1Ft<49^A`fJ zEoWS|vjPjcG%O`*@6(46+HsjPmJOR~XV`HDlbsOrOhj_-)wk<_p~qX__c3Z?fRZlo zFSEadbev#9eHZ9LFM(~Syn(Sk>&b6 z>=0ZE1nb4zU1JJ_3PAZ~diu@o5GEm)b=|llUxS4f^@QX6LtGnYQt1Pt2=GZF`BJ!x z6zj$>T@I9i_!t|lvT{wo95VM~UEP7#vX%n`wNWO6yw0=sUr5jFOkBkW&!2;msb8jo zO7Ouw#&lnfpWa(TD{ax22@ttO;eJx6tS#V(Q1&z=7A$B92!OB$C{OOQ`pbFhT%|($ zv_^mwDhrLzECkVF$=Tod{*0Mk$B_4moWU)08qcrI|H`GX_MQh*kv~I9J=kdE4_k$y zceqJG-eV6=X0dIyiGqyGkd6}NKw$<|7RpUvs_v;kE#TGUl?~6V>k@^apRtMZpjGGdn0*Z7lrc-Qfopfb7nbPTOj!zHPJ$BkIi_# zdRz+w;dxaQhTobIC{UUQEZB<&7vLqMja1&dL1^EWk@g8$=$n~_seVg$di4dou)Cwn zHqfMD$CLu9UMY=7|L!)dYwoBTh zdY0-0aByom1o3O8i5o*QNLT78-7(Xm2sn@!kJfu1Gs83OdDv7I;wWm!VYKu_r_dx+ z#|vK6Nvr_5vOcX;5elTE4C6+or_WIi^;mw=2_*b?r!;@?`Zc~yFJdw2%!7+Mkha2b}z821E7x zc>}-a|&^Z1@X1hf)5@t0hVwWg>R)ipNt0Xy1NLMb}ZJo42A_E(EpwZYs z`|UT#l`>?eGS-A;({i$@X)-c+NklOixfEzTjdU87d)6%^Wmxc*nt7vKfm%`cw;Vdm zEQCY7W=fs7v%9fpu;7j+db_^w04aGLkKPFSiWOWn;*7cNsHG^XL1!H=%IB06#?jR{ zYHT`Nv78sPXex0mG@O~CgylDQsrq122#^k6JDzW`u1|W~06^C@nHvOwFKShRp^{@D zz_GXSj~)zg7YK_g&JMpFb$l}2KUiF-;m8!PNoD4!W>ol3Mx~d6}d6g(v>5P$@WM&$z z5~1UD;bs(a1OGuuc@A#~1LYZ(}7S3eEw7?SeiUXu4Bs38u|~skvazbTmAD9w+QU_ z+ELgKolC60o$bZb?~Q6B01TZ8n5|ss+SPSJS%5y7!uY zeN1&6-!^ZMj=ombs_T*UIahc4^ne;Ywrm6;}wkKG`8{~ z02xZMN9q(nGQ<3VUab`d2i0UycL%X#y5S_VlhZgpT!ew8KQHv(UZTdCS#ZP0LZ1Sd zZSRgfiZKI`rLc_k)Fk(&5fzfMO)q)&iG<*~KB9LC5ied<#c-wROpCXNc#^=@C< zR%Xg;`=M8FRqjztm)M5 zV$ZETT?nTedWj!+$^yE{>u#Vg^LYJtga+Ym^R#fHX6KIPLKx7U1JsAX zy+C1($+m_;7pK_#v^8K_#G^RYaLNM97){30duO}rXJArO^K&gIq3D@VnYU!%^6pCW#Z2nTT7(Y36QfKYnr3rs zU(vaJ3H!I{QQqOmvW9Ovyy4uB@Lz+zgJ^^Qu!v1Z`jHk3Z4=C4a@A+R zXvFwA==t3S7aEgJu_j^NnoVKvWVbU%U~m=y!`>m5nzw}5Q0?EBB0JN-7zJ)(nB}II+^H0z#8D|@0@o`jVEkC@N(EVj3 zZ7}bM(t_D9lQ%$tVMqr{(sJ%y%T{&HWB2gbFK1)~Z3L6-3451xeaH4%l-z5V;! z`8}xr^490douCa9=oQylZf_zptI2RvED$-*h9ZnHz1?$wQ+>b|6<--XW6+tsCo|Y# zC+}}G1d1obImnsX{fj1`NP<@x-hzD_{s!79=S-WynJ3Q{N>^SU`-_#7C{jYuIlK}K z0I`8q3YQ)_#fBFjlW^jRtV((vjsuXnNvjzp=n9x~9MGI*Fe5s!NP5t|WUR6;+U-*t zl0smY6;$=Bqyo^~U_so$6X8f4cl1K&emxOBq&HIg8hR^k;jw;sK-N8uXLf*9ds@|x z>^aO1-b=s>e&B(|rO>hJ;7}0lzYTR#7h>HIi!Wt!=z;h~ty*&FDH>T9g^(YlasI*g zM3b(CM(A$RzUMs)r^$ws?CH=rML=pu9jrKz(rcJGHpplw?42P6mJa8Q~cV`%je$dkXdr8LE5 zoO?E9@0a`Ec3>WSXMy>_JqIrz&OP@5D{pVb{3AN?G&Q{efd+N)?uS*1+c#7F&^q}x z7086?Oafc5id$dZ7e7F3gEVH)rgkR(1B(B({)3qR!+ReA%lnzArm2kvEje_KMMWt!GGTw#O>`|{x7u| z9U~nhA>02_?0=$v<$uiB|Bo3VgNmnvDItTRp}FaQ87mq(nf`kZ)BlcRkTW&0G!(Y? zAk_XBVkBhdVqqm@Vq#<>)cr5L$UoPA1Dpx}gV6t*mrFR=yE^<|Gx=|a|BC)^D$4(S zoeb@q9sc8H?D@Z8DMA+~*Z-QwfASUirxG=FvotnUk`Vrno9Kmdys9U-s|7LmbaVj>tcYyY zS^3;EY%SpBiLbTjKSt89|E!&kxC3`ch}qVz6+n0+tcAv{6&{_lt=adUG^%b>0Pg>s zzcm)pW!C|1zhD5bhkgopexbF);`~_D7a1=@EFS_3>~)62wjdiJ($+xYOr25JFBGa7B2|%?G$r z^LWd#2EqkR$pM1EIbXBax3GKcHDJHqFnFnLM|;^Vw%~|LpVQow7tjj~f~=a_x9l4Q zBzvW1m1=qm2gyTyggc0EAKr)HTqefDN#YXbapKl(m`f@kw1bDutSu@A{TD_?mK66r zCEt5;V#_PBpOehmYLiS=r)083Wm-3s!%Z?TB~qKiMSs`hHfkuI090(>lswyY!h=}!Xj+?EGETg4bD#T+zKH^>>B04PVlKKm#CsA zI2V)Kky10i#hL2@oMErQKY?Sz3y5CGf*1S~hXsnn$R^hr8mYVU#b)>>;u^g*!+PW2 zAkK){XQdSi5d4k5Z z`1NW4_hDl(628E(a!(ta)GBjCSZ=*SIQ$iTx8m8*37QZIGTLi;r^3)QD;j>@3|Vxk z$EqQW#+Gq*8H*$h{p-FUwC&lI~o`fS&2da4QL) z20-U<=lFYBvLmQky1#`fXn!RDVL zU30uRCiv4%$G9oy?oJLatwUCLzF+P6Jfm2h_S(6y>!BA>a~5qqs?9H&mIkRcRjK^O zp=!Ly_`Nb$L;FK$U7}c6cz-hSFH*{?NhDh&l~)p5SVQ{&l;o996*2sPAy*Xb97*hw z#t43Gv7qwpoN(2JU3l>-hl4H=@qpvw2_R%i*Hb()=3udYv#69=E#-|McKELk(WaTp zP$UfguZi;fYJy)Mi3J4r$CAwtrW8p8_^R~UD#Y{#Uh-xM{egkNdxmLjlk$qj{>~l~ z7p#+g*q=9rM44<%Z1k*5A#Y%l(bCu`#AV{7^w;8~*jid!+v3J0fz)WWq3G!>=;Fx4 z_?C=)@ErELo!@n336B&AepfpJ_?~Z}7rA#*!N(iRU6;+vJ^8jfT^H!%bw2@jt?oph zOZM(H(QVf5gnVE=l5F2;-^>=~EiffPT=bNqoJ(MT3uWP@8bsvWBCRA+NeZYaaS~r@ zRGhdhKaMA*nv!iLhfW#YuUqKQ_wA*X9hMi|P z$M{+x5Z)tAA0;4#vT1@PVl>NE@kz4`l9L-W5M*52KOEN9SxAjtDR?fp48-?=X;xX} z{p2g>HRpx30MQ8urRxN7*f5nTD=G@6=|uS@2vu#48Hx~GBqCv#VAyvkia9aX4HK+v>3z2!X> zxOGSEzgRt=+nm_c#psZ^QtbQ@Ry5=+Ws(?il|`8X2LmHDzf-BS6dXo?=Qbpu$KW1P@0u)=A6+pzAv&06 z>rIp__@p(3=mo^Ex0}((Bx$4p(f~P_b~%|2X}Vs9!w(~xFGdEY?rGXw=3JTXDasbf zT83-u>ISS=x0kPw@ zV*{gNFLN-L{Gv(W4x>Ek_XfnS`Fq;ssH%`XB>g5HdmcU2XCK?bcTfVldm7fZtv4)oE#8}r7^&sS3K!_h5APQdZ*lv2 z6WmU(ic23+!DCO6@msD;!RWvDOLFra-uxhB+1~Qfe$OuC%!0o_RsaPg7{dOR$j6J@ zAZn0#`FfH1l=`IV2JoNzTVb$IB-o8h=S>iiCYDf@+$LHw`C>*wl{pHszXoS`EVg&X{YF}y_L4%WNlNOezN>DnSA6!oR%l4+fmzWi|V4d`8sL- zMTz(xGy4fQ`?%hh6iAFpg#sr^{!1y$h2gr?qq7(y-GV}UOS`uvext(X z$>z!K2@4KA7>!lljZs@pn@+oyBXW4~V8OwTvp>3ft7WNa$*a7B<%{&uM5sv7HeHLZ0iTP53tAXFHo zK9R~pYn_^!x0*Mk7CnuQT2GCO=G6x~jV*KOHMu^ukrK~o=>uMI+waf1^Wf{?&hQ6? zQ}X0Jdmkdc4ac%!N2awKP;JUR{x{~5eR!w?7O0cj1=`Z^?61Cov;+oBAMyb0sJw8z z*bA5PjZ4PtT1)5R)X2vYM)7@CpfM8`JSlFEOUnvyoG8SwLq@$|B)O@{`RV0sW|b4) zhiqoLwTfH`5e3L4WCYJ`q8196{P}Mpp6t@*&BdfNUo5-@!fLa}jY-<%FM1u2DTiMw zK!a(LnK3ahvaxx&!uw7xUZ{UeRPtrNpjS_$4{;qL17? zCL~Tol0@QyMtn%|#Nw1~=vpp<(gJABs`-7_S>B6}d+)nvYJU&9AEOEhy!e3G(I4x0 zcw4??Z1Ey9$8YpKFL^|^Hg|R#3_9${d!27jc<3|rH5$LFuKIl@hwUH2q!}Uo9rw~9 zZguvYRLS3lYxDJe_xe+K3vmU>haNVlyfAtZS^}ca=XD8tSE9m79mR%~HZP2undVH2 zK8!~KVu783qb*U;kHIl+g5rL@5YslnZP2+JB^eU>LYZZx+o+-qlctJgn!piFlrnt) zHTUObW^{-H-dNTVo7W7jgf}MaEIgfkGx)eJp;q=%)Cj9eXur{HUwXa;_%se!{yA3z&+T6$WFZ4JepVAQhd+IB_vD|alB z*17qviiJ|+&@2YWldjTSyj!GkXK;U*ROJMu1Hn=Bm9x6QgL%9gs|T@8?7&15pAKqAMB z2@D#wp+^Tq3rIXE9cX&6MFe4fi&GCNom_b#6Dq{{8&5L<>Sz6`^7&WxFwznyZ=b>;; zVcIF;P#RCFs0HFSdVG`*7vw6GWGhBq$7W2uRKW#c532QhT}Cgk?KAO0#56MOg_C z%&WW@TfGu*NAjC$Bd3EUgNg!gLLh3|?V2XCE-LNAB*YTC+g=OqBx zFoDpuDoXVu+VooP+YBztZ)qgPw% zdmg1bV*-5_5D@laJSoTB0)BpNTNne#2^X$TDg6`Bx%z2< zgohnqsaPf=Q6ZhsuHm@@1N7oDqLr2V{Si)|1S~)2j>gGa42J>ZnqmOrUqbxL80_3) z-N&iD{Q3I7Fo!V@#-%$rGxXI-O}S;JB2#v2#-7dW746-UPk&KRwbV<$6p67GW~YzZ zIYzeduH)b%u1SBzxei*7R+m}_aYz}@$Y1py%3GG(s^8@;kQ|aMiad}E8Xq!UaNRcU zD~#q#GC=qt|80G%da8SCdvd$0KZIP=2la zo4Yt8Qi#Z~ShFSc2*Tv4!^>g*Fw+`*s zcPj*R9Sh~ts-w&HHAi-GT)yjt(03OASlC z`O4?&@AXPpHmtD$t~dmzx!tly=%$+vY?0%j3l6L^U)+_j38bRa!z5CP0A$R3-JG zhX?O8GC&NeYQjE;-a5yIy@x-ClMj*)ln+=8W!|a=vZ17Z*3phOfY|%+Om!^&v@v^0 zD!-W%u`sK+Wm7rJ6Ym}iF~W`w`Z4~J2gc`6+~4Bx5V$J+=y8b0@UmX0*_gv5Vw4tN z$>6XO4|X&h9x`4|cj@5dUSv_l!^_04%)`gZC8qx~wY&`Vo4?%FRYXApZ5_3PNN|rp z5ZlxQ=pyOm#ce(A9A4#sl38?7V+2%+>a(GEP>)?w3CjpRy}>4*m+*oZhd3HM?kz5g zm;ipMr+V!BW&v@2F%bJKAG1i{0d-YKXGZ|4!M`DHAGyoI?K~*sCa(8mqrMmJpfLAm zIVTe%MJ{pT?<|JyL#(d4-nLpuI^H)CYu`I{%gs&X+2LHa@Zb$i|NVM6O!HO)?;lZ~ z)jKf*5V+;E_~Q#EL@`#Bfn)P#hmM92G}#P8a1^ukx&_`gDJD9jTX3dgPh6sb^4}+C z5)SseMFu!HufnN6q`j!yc>fye@JD)4QNGAGyLV}Kag2C&>p>nS3sAAx2f){ZgB*

@hH#2#B( zu|C;kvAttazkdTQJAhh`$G`TSj;lZTuo@AucZLp?wTG@BzfH~|UI$a(qGaQ?BWCOT zE=uf@^8>N<`U@QU6^oLJ!p%!$)~WCqBXMBh!pV)lgTsfyRsravEIrUgrHM*QBc|Ho zd1|7nPfx_8YONX9R#6i2b`K^)YjKqr`lXPL4a>mnJ@exDL}?0yJyWwo9z#M?C2Opt zqcIdpt|tBDYSuhz97(O!RfAZvzWEzh-5?(XGS$#jXQMjjH|&(C$zitZBX4frP|*i1 zwFEHyJJCcazTC&-GS}PL2Z`ZF9r2P6s^-afK%zB&RiRKr(~i)gnVV32wrY=ZM#~jy zEFd&@ANpEVI0$_lz!*tk#JIv~uV-w0)}!E^)oq4OKl9s1 zt*xW_EdFh8F3@vGgSA2;9*$(oAnx>nnbb@Sr6`AC0Ny`rGvZ=SF1#umQlmLk7Pc{M z;5AC&DAs%=9_C`aJ%OEQ1l4cEql8h3$l5vt=r~iuE#sU=Fws1|F z6buTp=8NHRfj$5Px$_YB6n7gmOl4_R@GZDr5(cb2gI#X)-GRt5W(p__0(z=i7%k2k zD$3d+u3V5P&RsfI3mgw{BY3nScE&!k5ltdN91moD7rzugsvW9b)wXT#uv4*na-L{g zt+m=^m^3BW%sDS5V7v%pBc2BpBrd{e$mt&I<6#%w82XhjO< zLMk7aRM2-{n#d%pZ5psm_vH<&lCbAO>gU;4_|<-BKmHrDstdF%60&*83$-}^P1 z>A&|m+?-D!`Y;0v^o1cB4W88*fV@KnSAD|^c%L<~KM}=HTJK+KdZR>+DB~pd=Ymis zGH1;O?4pgPclNx6OvjWKThm|PX0b&5K6%x`b$1U0>f{u(LnUm7Dv6L_72_?U+Ub>~ zHlNT{(kMIFt$5&&8&EF^$d5X0EIG|aDlNWm(_kD)TUZTf!{kRnE2gXA5J93BI`_1? z?0qfaSX}?6Uh}OOk>k>=H@>IDOOH#&W6z$4xt(^Vr`35JN#B&cGy$6Z_!^~@c#9$l z%49J*J=8(9b4nz~xXk#vTToCmUmN=OENDCl5gqZs{tq9EZIuTrJbGf>Fw8Av#e zR7yoU>Z**aT&!?KX&Fma#~P8rpnpjNmlh^-L?(h_#H2(ZBJn9R_EnlpX6642z9?)G z8E;>wvOuF)OwLZSGzf45L66qrCIM1uP$I6~YE!}mSWiV@XLtyflnLZ@WJN|2tKN4L z+i--xIV+febN22?=L)O-J#X2kLWF1wTl%3CTzP=Xl7_ADM*%=cR!W)lJ;TN zkEfAUezm)5*_$)Gube91sZ%%YObbBj-b6*PH8H~9{1XmQY;^Zwh;8JI!PA2H8f)+* zG6Wm0t?|2{hi$IrUIcZEQ1jdZ77{01lqd?^A3XSvOKA`mY-Vy7+r{cA{|m3-VEpKJ zXl#d+IyiX8ATTP&qjoI^-hYbzv9rv&;#1x{n2?qGV>Aso?!nh52gv1-gY;>e9Dh!?8ob(&H z;jkF-ZwtVI)JKON{)&NBUN7F4>IoXa8nZo)E#d0@D*uoPp+Em;#3 z6(cN);B<<74MuaLp}s32c;$_27VNOqpeMMCdMM3^s*O@k*trz&*YX!n;PN&lpJ1d| zMd-=HMohDvEs!qxZmv(pdbTPnk>`qPCv;*7_=LDO3T47oq&OQ1xGy5cQIDY45I{DI z`{ATJ!1vszZr9!43BLMX`rnL`Dv$|)V7#>})_MGn@J*3hCO#W1_c-ry-=ptv>^|&` zB@=OJPB@7O+ZK726XMSEGv;_vf=SFOPz|A@kE&KiwEo$nsm+pqyDhVsL)wOnb z7`CQ1qAGcqOhH$pxu1fX`-~jX+N4JfnPU_&%FKyGb0*w&%8d zey78W^4djv$nd9FonFs-bf%w!jn7HjT0tny#>{JPCi+!>YiBu6)->JjBm55nI5>mO z$2)_blAh1IhlJ;!iC{ZG72<{j$p#0(hCl1N=%EHL<6l-6SDJtZaf$^I0DM%Dr3tAp zO2uHpsp17MpCrPP_l0sxq93|n_y?78ur_>8MYg zNkW#u(RJ`GcHdh&TbO-5!LXPKEC=#C3>=%JHTlo!s`Wk_swzRW$AQMeyphbrDP__k zX(94yT+rbLGbthMa|!iIu;t|%8_ zmpJ?iFht7qH#+M{eAvf_hYbX>|XNmC& zTSZD$>%y%O*rk#Ah8nd%90@_SDn%XAf?yp>t|9A-lWFfih)7VAi<@2Ns)c!}35XD` z)jNT_2U0m~thLo*#8%jz4qq)`xuigbgw0K`I~)A zK?Y+gWTGZ~9hP>Z|6*D#>=$K{J=G)0&4Nx30`bYc{b6BO-%~$AXk9Fj+7#Sb_E@5Z zX2otElMyIqPHo$T)84d;L9kz+y=M1W^(}AecDemKPruuHc(YsI(O<;7tZf{J1>cE4 z#ek--#ngFl9H#v5OY+WB)m(_0OG^X?R>5JX7LC9GPm|iz(gY2pG&9=N5J3Dm)TlRXx59zW@SiaHj-~hQ8nH6Ig!H{TSm(Iq$qI07|ldfxi$b|4V$;`Bx8VGLm%0d&>A^w9&)rWU zZ;pmjU$q_Mq0}i2UmH&S$h?{GML0DdHwm}o;TG(+elI{iVDsb!-JQbF z-iebdlQ+{ONo|{jYkWp(W|)0ucoJs??j3G3toJ+@hkak>i`>~U*!2}uW*-^(N^7yE ztKb~lt@pY}7c&59*FDp*s5Xz_aHwMETM=9>>(PZk?inD7EX^8<#|p_8NK6ILz2HK? zuEQOjm-Kh~3NzoQBZLhFCC!A90U5zlE?z2OPQ~Y=E0vIpdXe-(HCHZ^C|NQG98k*F zEF{yG?+sCtN8}&Z=hk$+LY+4CE&WJ1;i`g3C`jBfWhx{9Zb?~*y`&^Y zRK<{M6Ib`r6zqC>wLeG*!#Xp%?ZLP;0L;kc-$1`AIwu93#9xICBA3PMg1D_Jyh#e~ zhe%v>w0gfscIV;032jSbA+%)ow*CHUN2i$yAN2UYonff9>wLB%^!u+tC+iq-pf8!8 zU+?x*thYZ*jfQ3-IK9Ws&f|GiDf+w#?2lG{Lf&xIVa4=p(O||*hmFT^9-4;NG{M^L z(+#J`*>u_4%2XGPpx7}0 z@7HHepW{qy$4W$~u{B9H6EHn7<6uR-cc zkXG5~&&@dkf}IK(Ht=4F|I(2BH=44-Ax@m(PC*OjBf$#CxrRKrQa;gUstW^l-r)`}#BI4hE;{FY_&3?F z7OvQTKl{!RitT)_D8%@~we|RK6=Q_=?rfR2ji~88lX=`S7}Df?sT>u(n&H0zXg+Uz z4-I{FFbXkfiyXNQmN2~Tcbadn^nAiMpfn{Dph+dckxM|348Bnk>CIw59Fjffe%p`T z_#9G*F+Vaoref!byoC%isTxRNj+(ez@b-10un~GBjn(ya>F06EB2p&mbpWN2UXNhh z5=uF^xnKd&k`{m=_rv06CBjO(Pi43QWT?BWcWm6c`A-uo2~~=`m*V5PDR#`aTX10J zrjm&1TjL&jh+Pyk*xO$^3r}yH5xT;6j&6D@NUY_-H*O#@ps`VqB!(7E5xCI{i5kU$ z2P#H&`eXCNe`l%Vat!mc;IblQkI#C~n$JKyhFpQ1b=*i)y7+|%CnN-FKp@$_5m7KU z;NAx{yE1S#g7BZdWIfVJr1WVNHcG_^r2=@d@f|x4cX!)uUpO^S1iG zu;P}#f}%JC!yQjAN30|J=C z@=n7t>O+WIQ23Bg4%n6~x@4AJwitrIj$@JCWXYTxSc?#sc~$EgUc{rz{Gd zjPV5_3$R?y%+J9GBpEUaRLi2nqXQsdm#qWDv!iH3#k9178$j9z@El;77X}oGuR~G! zci~EJTfr~aAQGgi2_m?1Ga)=!w0uYm~ zeT@J1XU}>}Y4@U*(oPsI_q_cD!8XPkb?FL^lG|q13O+}sP3f_@Hl4i7JEAqliamm0 zBNQ4d@jM4RB&@*RL?O>bn)Em z>_)fQ;WC|@g~(aTa>n!SgoSTMG^KG;4JPpwy6>V=n%qKTwEuAX)>+;kh-Z@M`R${V zr|o@%T+lch|81vCjk`|+W9{v|sbOuhWjgb0_fYo}kw7RAG(ydyG(Tt*N21-^NucfEIEvFtPdUl z#!C-m*Ekn~JpGqumV207UzZpDT0r#cwriF4HnrD~XkMhIRmo^aK*E~{?j-YmU}t;; z+&yo{c`KGmiThN^sQL(M39gul(xu(c$&9Xv&#ZO`Q?Ai4>?hM`)>+;#tg%(+*ZVUV z?|M#Pssik(uw;j*ZiF$Yx1m)Us1bkwQBXitqYY?n0*_c7H~4Ztbd+#~QzrHj4O*mO zfGmDIs4dmnQl4AuVgdzmVFL-IZ9@OjXKfwZTb9%m5<1W~l?m0{ajS-|Fp=;+mz6ER zBG4=C#8DFUgs7tmoo%S9uD1u2y3W;L)G}1KF_~cpBv$BW>0zmxrPgq-IPG;21P23AaE+jHA8|janRfE?tz)e#<8y0bq*OD{ z{E(-`a+iGNdL-K<46lRPVfrZ-BaE>-+wGP2?=r9NFHfkxV&f=WPL8`7z#q*y z;YMA*<%kN6j%UTktx8@jUDXq9m-{;IbPTPC0%Z`O3C*BHe8{<+%vbiEly~aylI5(`Jala?P89 zF#pe>@7wg(ks}6-2AjWgmR_xJIPvTh#&4Q1ny`!8E^yt@E}duT2%|hYW3yP(;@Kl7 zP;GhDxzy<OT}jdA0Cx&VOG1JX=}sb-UY2;xR<2-7=*+8KoBq zgKzWi4D`rD5Y?ruF%MpvU9JsX6GFfOkMuv+`y}0i+rswj@2#B0Ms6m(V%oZB=WJ=G zTe^wgIBRvu_X&3gXG5BNN^#=cUSf%C z!V0f9zKKw|RGnyU4^AVJEL;-@z$j`=iGyn1uEBunJ# zI6hQeGW*~RF)F3nKA~Ptn8|Lhu4!!q5>X9oD5HrO8^^patdr$gCyR+2{0XB+2@-Z*Z zn*F`(RZa|-mi62hM>(|hK6$T}%X<+E;1W3nqy?;DeH^3f$+Vr_W$XeZKcG|yw zT6))edUxVEm9y(fOJ)$LPl#o^V|(rhJDS za*)WXkjTURFTO1h-`lzal%_1h0K9A^kGxM0vVC_?({$9}Xw2xJ2=(1C^g(P>Vsd+| zbFB0(^PH)}C&g^e@}4wlfE~tzNkS*qI16(BCt#N`&0C@|FCpeI1|w0GN+SB zW*ezBV>D&U1?efDc`Va zQthj4-i2kT;__JL;Oj=uf^S_;0a6_;%p($Fc6y_8VKB zSm|o_l;iyL%p2d2gi>V;*BQmT5Qt7;(}6&}fqm-V&IocR37ILy&WXfNG+t?zJ|YnLfC%T2^vwb3dbSw455e6#(1U8 z@0B|pvVm(DPvZ<3ZWHQ3#??CSqWY~DYrq=ZEc4vSzxIAr5Uty^7GnsIRP$9dZ(!biH42JIyrOOlzB zQZWy7B}NS&-j}bSo+QxM$^j3g*U#pIxhhM*Dc*fkYD6ebi3%i;nOvBshui6epOAaK zr!sSoO13Qnn)7-%Y?GvROlv0c_@teAAIs4cFS zBKmH}_UA9YVPs`Mazq6h)W1*dUWC&^sx{sA(ZKC_-k1>RXcr85f7%+h&KY=(`R2y#cTmROA5ye*}p+KRK1hy$K=$#qyShYg;;-zTB`+PyYYsA5w9E!w|M|<*QzZ}hDql%9_x$zKqD=CqAWkb(rX-UJ5 zi=LD01@K{sV*+sg)Fj9)vlWz9`6oQG{3M;!pZi5Rxl_bGEQjK(`5i`SAH^1wmbSM3 zS#8G2L%m$ZoD|@k(MU8j@T~Uc%30s-Rrg`YnaR@E_1OEUzfsUAn5d}0uP(c2CIN=B z2`51D``e{js;4EpNKR6xJ*a1fq=ZuG%Vk7}n`|V0{afB!%l8fJH*|j=?vBwPq?&o& zCD6WE%aYP+vl_FOP)rLfo4iL>{uyFsX_ib&gEa8`MVPwpgfr_I*ncv z9*gw{jM^9oZ|5om)squ0=F~rMG!ksOn%32I80PKd2XaFQsu@iJS^6=Gt2>--(#9KX zrafJ#=RDceMcaPTxc|M?s&yIiabrP^Tnl_;z8BWjRdI16_>7CC28g^bihMo%_>}N~ zT6Z1MA^ELA>LK=60tTV0?TRKxTw=TPcU!jK<(;82I0ASk6}>j4nI6=ZV^0BN_A6VNiYAkkV{1K-VZza-ZQ`NOIy zcF6RR4FO6d!=Ej&oN9wrS~eQZq?*~K%Lg0sjNs)(XCn6205D)zDzX%T%KLh{(?uGR z-a8Z>2tE+CLU;}6r4kMlRQQW^I>H}%Nu&;te~Bt4Y`uL3Q@ep zv(yr<`kg4(u0c+VBpQejTlL)-k{lW==-xFD5Ct>+c;65PlLL;37gyCJ7}je?{LXw zq>(}>Ss)Z0ndCmonpN=5imQ)SM7GaTgjYKm+ORg+79C*hnUR@}k@?SWGHIPQM1PMW z352ZdfvqV$rKm2>+(l$(DRmBEdWKi>^Mu!Ae%T-u0JqF-e=AAHJr9ICP) zJeqYEiU7tsgxb@ zJ90*5%dCMrjEzffXr+!^1}dUhpuE;!BND40tE1>J!_d>d8)EVx`v~T=cM#Cr~UCTA&wAt@HeHBENiA(3GBdf;3c$u;!l`f zy~%i7GK@ycVDpJw-TQwpfT-$OyMaT=8aAg=A9)v+mOQ9Rde}5h<)F3X`9qygYR}ddCUgYHpNd5?N|Pi{K(7?!=$t#g`nD=!zCPWLo!7@UTF{sKQCLvsQ%7t zVx~SUDqL{D#OZ_DwX7;V&Ge2X?Iwv2&0GlogQ=pS>GH&+0+}yA8U8 zG;JvPH5fz=1{S=8ZzRAPV2_k|ML0@f8bEAV5ceD-G3TkwoGcy7v%eGZAp)S$9A0o5 z$`)F~(`2<#Dkq~@Ipa4F?gHSma=O``%o)IQ0?-TU2YWXzCN6?JQfoK5HVmT{?E&o2 z`&yAxj@`N0nwj3dM#;5ulZ{Z4fy{mW6DW}$4(EoKQdsg}a{aCcq$qt>;hc*)5B3if z`eL2Rn;?%R+s4tgv`lHc8Ql7pbzm3a8Gi!G$MImPpmWEiq=v+|G=76HrXpHF11qjn z7%*8oz_$%)y=gg)Fkj|rT@}4X7`~KR#!dvQYcSAE&}_)-rdFDmyQ4xd-{2wta0M~kPbW&*`l`J(;L|O#)L*M zuDFay6-q@idNL`7@&&GBM57Yi&WRdV@u)6Xg(Mhqp>$p{7kYQtR=oXD#Cx~Zq-AjF zG=oVRkshlxc0xDp>4h;4N;wWBVIAiq&jNNc!fPb}zx& zOr3hxXjp||J=@o^99LDSU7(Bc2TB{1)UlG-Oq=qkN|QYpRb^^Zk7TOg>2DV$m}h#T znY~+@R+cppqkyeodH}6Ozr&pLa~lh9mmM}ei0v{!Erj?a!J)Eix$}`yvIkIlkHjAr)qi{D-{Dw zl=W2lW1ga4L!^u5uK`TlpyX$iD3tr~cR%r1HB`R(ayAnk2F zaOF(2iJ^JP%7j1*O!Odg1=O2M!clD_^2-uvN0ldp`r6nm1-1_9*7C7q*(0k>O-_{b z8vT)!N3fxZuAu~;&Iek`p~7j9GNB`L|9jJ_vxZ&QJI!>}uD!mZ4Zu=kCm)~-kQ zNl_c3N>L*M=q1ybO;K9w_gp+m*fV9Q?$osD7RKn2n;B?Qq(?O!t^c)M|70nHYi@O4 zau!ytf_=DukOOey>ULy^OlFaoO=2PMH)PppBmUke4d+)={IW=NZaec&4I|P!AZD zx*Mw-MVz{CN>X8Vy9N!E2v3&Mo;Ug3(X1Z9YQ5z;9)|XT(>%S=AmT(IIV^n(c7-!# zUT9QwKLKs#kBee5XQb<{`!#U6!l8EREij5$mbFW=^;E5b+lzKqflx_^1Sd5o;T zuV9h2)gS5}W6+_I%4b%g;CvvyTDs;}tT=>Wr1}`;Wyf)k?f|w!_P^;LK(5BqKv(Cf zAbv_F@x`tg!A8dS{q(^!`b3S2fdcDPgH#W-7Rc#Xob;1s7*7g(QI z-w_k#9qFFShSobxQ0rW*9?h@Tqvy!ZZn&Tbx4I8JG2ZAIRG&jXcid<)r>=Z$U2mzz?!R}Pf~K7o;cEY(q^zR z(&W*Sf$lAwtxC-|SRrbK`Of?bS6u%JN*vd62U{(xQ_(oIyijN44$m{CSCrK8}F?}U| zP4Do{ivGiUjTJUZD-Pyuco(VKEIL@bqd-)_Pny<^*S z!gZ40ZC)s0+TE2#n7SUcm>EeWzE_=~1*NQTpe8VBJ>eaPH`RsWLH5Fb?q7iEl2Vf_ z){WtnYrvK+ZDkGT%0-|nG{zvng*ZJhe&hXwa!Hmzwwpn|gl;8y@0VB;tUW~?Nh5ww z?4j^f1DH#B4_x6M^-ut;1{c!qP1Twd0%`%HNAcBx0HAxM~!;mhz7p?=8MfDguy{e)dKzTz1nK@n57S!NhyPp_*sa; zkA0=|#oi0IWf>&Z+2${gCvGj;lgdjB! zdhzbaIq!gt?0$<$4#LMT1x-jmf9-xMaUecHLILPFA7cA8YC zRBdT*(Or42v_AFOjf`|Q52rGY{FHk!7jeabFx+v2{Te4p=HU7UGM3Kf~rte*+cDGJd1h?KVLYFoeEsD9O7C>A#AjVO$E zsDw=nRCulLOw44!>s-k5vF}}eBOJttA=ZGMV_e->Zw;_vNFPFI&riuTF&(QNKsYLv z)2z^bRF}|BMoAf7KO`IxqI@Y>RAS%l4IWxzKsgGo2D+myS@)G#g1)Pj`QcIN_QX3o z%bj~pC(0-h=`n&)(HCTQef_+Z@FRI-LVZ8`2idcJxjBMDZOTFX(N8_Dm*l7?Aq%7(>7J&I_O z%C7d9>87IodR8G7cyEx2Q4g*~&AR_ogq;=RBw@WB!Kl!@neH&aZl1pm{LV;L)Fns`En)MT zKwn&h{b4&nE0FtQ;{`Z6TenGbzn;prgwbYpI9yHj8ouQIK{>tI-9CHh;3^$-W(-Y> zx5|7s&T7HvgN5fdXZJkX&FyemZ2M3@uH~$o^(<|xBehn891v5%=G(nSA0ur4^ckf< z=@o&9WRz{Z)x+zFJk zoPAq;{WW%Hk-alY{4Rda{B$(1Na8Em;iWo;eCtKugR zxV;_$Fcs4WUM;6kWl+natGHC26L?qTGQ65=ktsysawiaw9 z&vjZ9y=qR#pOwx{-`l0@<%{JzQM`HFO?G#VoBsk9_GZ1Lr&&AGz69qj;|IaDX5PNm z-yWY!$}GMbpLp)Z_UYYXV=x&Q*;8W-Ne_Yl5DPK5L4%TX7zUN4B5oO=z{xe@n^A}P zLo1sXBJ^k^{GcX7`7R>K{Gj@vVKKO-an|V9ZBTC2*r4q5K#{uba+NA{JiIl1<<0Y5 zUfyJS&El$Y$-DZ4!(obts~pu`e$!T5aPf54DUtj@<aTpird%(lPOXg!DLh6r*a3z;x~(X>rW%~lT%uUY z?k3{ivBIrV{j{V^ZnvcD5v*~a`|tp!1EOQYV}srw%;bKWjahIa#ag+|RQW;g`$(OZ zQ0_>tisX<;b9h;P!QM+c|6B|f7k1q88uYZEt)zfJiQ=x7y3l{axt=m!cUs+*J4c&+ zo)STBV*r<-tP9jBvoX@OslWD$V(C{{q@sjzYONcn!yc)O(brb+n=bNxmqIp@tC;>J zDP+Tvbco8yXFWKSLK%0<0sc<8l&g$d)Jq6x%3&DiK^fkS2XHIX#I-ef6@doPfHc0& zzaGSg-#=JUA~Zf+3s;qfG~Ui9(>l2Jcek(<6TWZ^6H2*CKANHQ4gQv1y*imB3-yI| zVT}->z<@cMi`PJd(|KF%f=N~*w_ll|TZP=UO7c4lv6`#DnZsZD;Xy9SHDO4f{1J(7qA5fvDjGv|3=B9=hK4l;Zl<5M z#0=UyK|!F_<`RE*%>(`8wYE;$=To`NyE!{~y_i0f+StA9{}#}}NZ!&d4D387ch4WY z%0A?a%%=*Fb*)2f6iUO34pG}zcps45+$D5w zLJp?op8EQv8uW`m6yNf(#~3D208Y0g5N?`>iXs+nn!?qYqzJ0wTm!jNd&*CS6RoZD z_N#(4+vkHvKAI|P4D9GIINjdbCHn6ATMcg3E~6y;%h`kPcL;FkU^Ou|n!U->W*vK? z>1!0H&=#G`MkcD&ZmmX9Trj+!K}M8AC+@Y!e%yJHi;0Q*BkOp#jkY|A?Uu?ilUaio zDlZVZ@5wnQecA@Lb(0y!;*CWsROIB!hwhRp?%vK`=C^Qn`sG$_iabcr=Re@Pz=mNp ztq!;})nHa}n^Gkwye)ck(lpYvHRKMdXfrjq`?YlGI!_rK(hKK#hFb@I)df`SB*>YU z;43v2xM#W*8u@!!^=ce7&a+sgPo`6+D_S@B&_9HOv2rxGS>IchZB%WVMYcIBU6!=v zzv7PTymhWV9bQqlY_}HQTO@jv!K)*z!>=P?{Id6+c5)r-PwY?9KSNLyfyOO$FFF-G z3x13)_B>8)Zn9TiTs!5o)W~!Y)X#pqLtCR5r#RC{KW>KFN`xsFN-i94 z+n`~BDTHj_WG=1SP(G|`T3EN*!+XrBP=}kMNSCZPAvIr0HELW*-PGV^4j)hdBYmsY zUA)iijaRdU3A>_2Q$CrrYtLScimovl0jKE9AH~?-P&c8TmKdg1gr3A!yrjC!ekQv6 z(J$ix%@Xa^n?X@N7;oN;!A@{NW!nmEMNWRI$RdLQSv5wkVj-_=B%`cs)VOL8`%6`s zxnprvT~Si9HiW=hR=ie&x=HToc=5jMwD)v#(SYB{#i^i8 z*KJBEySz!I+vvmJDt6PYqsKeWqr~^aWQHbMg*%Z#0na*xxo;W4?Sk z=V)6|G41R9ip;on|5EC5ctQWQXkY&oDwz31?Ca`$cn^pF} zX2B)U$!+Fm+`e?~fE8g9x0CrU%S@n95}T!3b)#ja75T(X^ri~O$ti-cxtf@q-eNo7A26v#IGdsRKEC zT2;AV+kUIhrhCVM`McnVx(cJ1(R@>;%N?&(=~%!0x@f{mCe(`tL1ElbH4qIk#P5@a z$X!eNGkw-N%@yut7G(gL}8wN{+tN21wVKdUp#No?H-X?5#u6v{1}WQ z=&?JLJGgDRG!i~Gx^-3pCt+BsA=z6{br$ZPG?TyFdG3q~=B7cBw;_+`ljU4y4T%@H z6xF;CZ$`Z74|S15%h5H^9npdIjl8Lj+55{g<+WG(mf_$HK^n;NRW5|$ELj&cj;1Vc zq)$W!`aHCIij}^NAT42oVtSSE-trt3hfKFYrmeONYpPByPB9$h1O7N5|7QM%cP5%i zq#SWR*$l$|SH8S=s3|j!Z&Tt-p(EIX@0;)|+_>=~;m>R=o6sw;2mM#`%z-v z$wKS4a9lpC4k?opFJqI&^L5xa;zvS{<$LJcCGqrQV=SR5Jb{&bM?d9y^Q_@JIZZbu z5y{?lVTjmIx=e#istJ+R=s|wPMKwh(vMlT`kXKvwz?lAzdqBYrZn+<<0-~@%zDkJ| z(_+RKvP4Tp2|!hEL%rX* z_H7074ekw&)q%D<>r4LUF8fUyOTS?lYe{Sl;>l@4#8MuZHNVxi|1EA_@v(Z+xP%&* z#63fwPmjqBOA5pb%)3*q_W2wS%O;Dn@wqGjZ!rHS)LM=U!NQ^bLZ9vm@qTVZr3uKn z%gBrMXNVXEd~UeLELBoS%?iO|hz|zuvh_@qX?5CRn!A)J6*nLo&x3vo^9y?PW;nuA zx)4aZICUm_ai+*3w%a=e+Z4ttG|!Rs{s5PQPgL%b>{$I3?IY)Y;1u6NJ&5aEsXy+I$P&6=V~<+An*P{G!=M;y%bi4_PnEMd6(v zTV!UZ2siO-US_`k@QA7~i9HEzDckFP>gD!s=dL<>R;_pCs3!dNW(99ob;w5Hs$6A; zWaQ*xIU#=(P41S1L)4<4f)J50`E+KQ1S&BTEv!GuDJktMqn zf@beh--wG`_k^{PuD(lcHMs-tm~)I*G&l5}pfmEJ>N(RJ_Z!h0b}+I7u+^h}tj6e> z{G5t6rfv^LZ^e!m-N!q47bf*R9o`|?^EjdAvrynM(sFp`F2t_*jpR-4P@}I(DYLn% zxkZC!BSr&8qiRF6l(k~JS*2O_iFo}y*Jj5;$ChXJOUeuN-Ehi$I~66BCRKOCKqXIv z%5=`gFfGqH%oLaCYu0CvcLWU@&Ax5p!Zp=Z1h;I{1M?|{&VmE;=Cm!s z-R>gruEfaO;(^NDrRQ#We=|HztF>WW;%Mg}lX3TN}`tG3RdZM3p)Aa)G!P>GP=N71j(Y4!k zNCxLIt|^m8rKZ&W#kHv^D%lu1kCL$h`l+HSeGcjvyu-<1!;{5A)PSEP-u$eLy=A83 zLSB)xN3$T0*QGz_*XYwa2F&R^d~o(YJOI(-NbC1uW@Pv0z(_g5O2VplMYK_Q-}0UAUDQI7P;}_N}lF!^Jxw0s5>89}q)d*#-+& zhF5bKDG=Ni2@=est%?@IBdER4?I^}1NujcNI9K#|terwIKbMHpOf2yl-kICL9j2~g zyn&esea26KseQFP=ooaP5x23@1^H9fkJ*RJC~vv2KRP7S{bQfHp4}5{aSlsfQc$uB zoo=cXDc7v!-Alm{c&e@&pmt8a~Mx45&4UftH5IZRc!U?0|lu&KIvhUMQD(8>}AG zZT$S29qcz*{y#ro6UU13Sqx6%mr?tvhl+z{CO-xZy)K+#A7w*y3QQ>GDP_4jlfA0`yU(NmnZ`zn5o%T24$A!D-03bmKwo)jYu~^{s0TCQp6H%l5VVnqgPbR$A4|&9O}I z=H+mptjG68D_nnJt_zXj!^YW1G3-3-y5fe@%5nvYA4~}b!SnFRvBggawH>Qc?5;W< z{fJ~1SPVv}$5t0ZhMm%fJ&0Y8S&1UceR3Y@^Jj3Xps-78<<)dzHuMbd5&;^@3_-1n zH6?~zXNG2`#U{V$usR(E_$diL7ZP8`vdRU5j-kex7(T?BCr8>}EI-L~gt8W~+CBuT z6ouCBmfZ?iu{t*aztn!V#hfKNvqiAMUY>;ramHL{mh?>x;uZ`x2MdC)M}czJ{i>4+ zovWN%Cn2fZ0<7Glds#Vpfx;;wGMmmyZ>jO~Ukkm%qBVHrNeZ+ro7$i5WU(yE!3$$- zGI6mVr+U&C6VmOENGYAkxr@xjra9*V7C~iO6IvXdXwphlNgWEN137+X=^)r6&&d~Y zm{peLo7&(E*al(>U{cQFp22Eb5TBX?Z4&fjlV%G(32l=#bbn4|(-z$mlN)F6dBw?; zlAwfvf3~b&zeV2Km*OX>H2l70(|oa@{E;r&lKaHT#aF^}DzNz&fK4I)qEl_Tl}W^} z0US-=LYHvuVH(IN(ktP!@)v0o6X$53CNZPeZc6An1fJVlXtQUMxq<|x8+N$h{IL-Z zJpEp`MwR3_GyGvHa9l9!(Ue%dIa8A6mgLz9rK2F6)Clme!B2IJ7uqo$b0z$K4X7)vIxZ~f){x~ty%bm%Gu$|{ zVhtw5JS_rGZ1=}dNXWUSJ|2c0FQU6-E*z0joc+HCG?wI$W=P1ewAqFFnu!fTGXizH`R8QJKasukTK13l&<2$D77*)f*hLwJ904>Y8_mTef;~YwZy1<5zQhR zeS(9317%!8{KCdal#vUEiMUq|^JxcLlt(#bK|{gOR24xFu5k1VOqcAi^0$z(a@dif z5=#R1F%sNG8$ij}Xe$30OV5C#%oU28QL@30vWurMi_xPjBa-lBF)^+nW}szErn8v~ zd8HyG#K&X~n!2)B7)%iiw2H-wuKb;;$hiv$Gm}@aM~sVOVdEE^DqI0WBIp$AP4-ss zD;yW>uZOy!#73sRjoTw(u+x{7>kmH%GpD*31Z~1nbKVJw(y5%r-c@}@GlmvDI=P@E zAOL0*qP9PHsDPCu*ESEf-0>kFph1k=%L?N|qFTB9TxG13;6Lb)@e@#v(jtqWW2@@>DTr^u%J=PioFf&oaqqYBz#!!c-;-7ew{S zfgf=!3F|Pb6?%R4bjDr2&tTZ76<8rL5i?R%hJ%@2ul*E6(C1Y~BX2iTf;M*=IVL={ zh{S77$e4!quppYds+zB8PSU9=GV_NUUN%^i*(n-@N2yQi1coRtAZL;c3`yrhBI4{_ zP*u}2TW+CxIvcU6Ol>j4#4$2lZ2=+b>;rtd97GX4YxIVVo@0EB@#Qej-G7Fn81{nx zIc{Gp6_QsK+$oAxjqvC1GK;bkR6um}#c^BBxUP|*3(ac=3YsRt^0a+bt(v#(^UYdv z8za_svrJQ&(SwUO>w$QvPy0de2c8v|Pw^hsnSiTRj7b8l$xT$_JOhMR!%e>~Tb1}6 zZ|m5^!-SDL>1R<(_@RZ6jE*e(FIX#IaQ<`)_n==rI616-+wfv&il!v*0=@bYAQuW3 zD!09zpbe71LHFS-Yu2qn?oz4>u_8CoJ>8(sf+OQ}sn@jN){M)FMG+Wgh5XLAb2LAe zdUf?$Ep~ZV-$Fs5t$PtOpfkFNfKz#M2eNB>rfBl|fWR-2@^(pn z2)7&M8CcSsZOS~SrCV&?9Dz3se<)m6LTfa}S#Rjhxu?T`WFwPHvP=8^#uT`*t9=8% zzp505 zMFKr6F>Wp+CQ{L(d}p%@=bcbACMy^^Un?#%=*GP?7w0%pBFPpsPH8T~S{$-`LJLHr zgHc?{gk;`wAD8AZX@*Z{2V+DhV$Ff?EeYqP(1CjU$7#^$@7ck+RI2(%ky{TmF8X zyuv_JPePzb(Da?mW%$qG+t&+=-wDk0v)37j>-7GJv1XdQ4cG(KR${jMhX?Cl-}_M7 z9Nh7ka?XKQwNh+}CgT4d91!iC8W4NT9An=!f^76Z)a=^6kU6}g%(Ava{bDZ;aUdzs)*K}#zYj21 zjtXGt5W1M2=l)9iFOYxI+aLR006PK)?F&WFMf*l&=@LhF z@`fhcMx3oXfbqM3R;|AQY%&?}>kRxH0&>6$^*vvLV~3;Y82p?utP7vqhi`VXX1E^` zO(Y6uJiZBg2J{&G9z^QiPc6vKB{c%?TZ6y`J~LpSVq}2%#WNxAtpw_4w%BT&MogAn zAyuBg+N(HQVs;)=ccuD>GiI&eY)9v%vj%ZD=gmZW(d1^PxbwGg0d@%2$ix5fut79! z9-XN;k-3|^k6sk(g`myacBh%oLy^~dx^pM%g+ev#kwCN9 z9{X66Wcp_Dx!V{G)Y_(*Z2Npt>%4K>eA@g9PtZcC@M_?gvqbvGMQlV9Y#sN|r-)TZ z=pfekhFU85z;5l3aZat;o)O;>Ao9)klX1zl`pxc{s}`?jhj{N>EB(QNPsP3$4P zwntj*0UK4W)SDZf4d%47wg-prY2XL8uk)ZMgZ~*I^5F7N>Lu5a#>k`AG?qOzTgndJ zU<=4oozT{U3SY2s6jVgbLaG`(jl$uDZN5~JR(mmxcd zIc8uf<&$KEqj%8V^?dq%LBa{pJ##@2T4N*1fv1g7g$lX6Q7yK?^F9awY#X z{V=>Cixyo&3Ami?2|`$3d}Sn{T(7y0(qwu`&aE39~BQ(^0)sC&6} z=FfZhsqoh>6|N1Qqzn1nRQh-4O7Dn0gk`+Hm>O~>1*LF$Q- z(}F1y>LDqgqY#p=k&-(Xtfi{yz@VS)oUqxwE_%_O#Gq)21s5`IyjP&~Dy<(h#RHm6 z+i39hGJu9;4pr9(1oV7ja+qXn(oT|WQY-5*o3XS^_3hb5aus4TU0WIZuCG2yABk`~ z-J@N(ipT36a%%m2k=8xC7S71=eYjqfclWL3EoZ+cNmTeuJ=VLkbqsNi_Eu^x0ukX3 zJcEVbv8dQZgybT^mTIAsss=O5d5=xGd3gad@s?Gnez@GF0ZYWY6e7qW<&uOUHv$wY zc4&73V|hNS}xN`{!F1%wXbxdhn=guvOTm}w{eJux;#26!(4m2b$aQ1P;b zdjS1JctG|@0;ypRtT?81V8qwgQDfObH^D@ZhmPhdrOmj={iCvPNUg)&nrExg6Ktg? zwOe-+ab2EE<-kfqS5dV&qspmOR&Lgotp1aPbMzLAQ0IlDfM92@vQ=%Co^1QpJUXto zsiwt|u-PVktjbxFY^Y^=M1^wEgWOSZ4JXMPL8?lzRJr1}d`*z3a+HU@w2FpRrx5zKN)XURt6qoPQbo%XMdue8qH4k_hzeK<;bq|jPkEsYd3^N`TAWN)pblc+| z9qTqqT6T-Wk*NEHZ*oe#U;O^)WQFow!2VAp$lO}PD1ygck*DxR$kdtkhMD8YTR@}; zpdBfqA#hQX8xa=F#PMvi>vHbWUn6_#=#27$vxIsE;~71HhOLD8M7pbJuE<}bQ0Jta zs*ee9A{@4eugC#N0*>^5RCuw;YX02Mq5D6a#v z(q>W2ULkdc+Cfju6Q@Gl>?6)LZ)TE;mu+-O!7jRJ!mGi4l>aUtJxUjyCYw5)GMy?N z3eaz#Kyv7Io9S5h?6HimZn+NW+~zUGfNvkxF+x~tKPPPtL>glOig9W%i)TEpNm<`` zOkipLYh|6@I7wYjd%nGI1ILUZPrEb>Yw-d-27c^o<~s<4@o3+xT?%*mIpslN^5 zibzLAo+qHxa8zv(?}Vsg6L=7wwo|s2)`s*qYZ0o$)$K>KZfsBFB=bv8ZIK!IB&J%~ zxUA5^j4W{Vk59;FSzqym5G=HpA|X6jgc;xLT(w)-hyEYf&wqG6|2J^XH;U)Kutu&K+Hum>>vV{LPh4QhCxkIliiRMN z;9-t?!t;dI2>7u?s(jK=4IS4PjEuBmzBCmtLY=I<_e|5nSHlc8O)tvHj(L+Z4x@=x zA$V@J(pAIIFf)umtXEb{{=-mqumwgl@XFE=SKZylR>tB?kUfa2a z4Tf!1f_WSXjHz8#2u;~V{<(2qZ-<&{L*ySgWhdOV4OZ+aJdR_(r0}{a9P}Rbrq4Bu z;ES#v$9}D}`@0OE_v*0mp(x#fdLw7`#M8M$&Itzagl+Nf3C)u%NaWegu-%Bb65a7o zI_S~9y3G0bvWB?u@-af@U+SE4>^sKI14L*U-7`EY!QlJg>)BFC1n`9Mr`!+e<*<0-c3{9a{mO56fimdzo)>z|LcFv0VaCJ|DFN#{{Wx5Aj)@tc zo{o|Izc5qcj{25n0Dfx|OCx-`e^29g0Q>{0Vq>TOSN!Kn|B;|w+@-!7$mW8(TiQ~{6GpsSjcCp8ng&w{O8L`nfHE<&l>FCLkF~%+0 z4!s~$}n?Kf^tdD(v+CD3wFpVmi&PNuNIme1( z6m#!OoCiLfho9|F#a!_bxN{EqXU^l#@wKawXRd)zRp-9+ePEZg)OoU6J-@#szaaZq A3jhEB literal 0 HcmV?d00001 diff --git a/docs/design/imgs/write-and-verify-tla.pdf b/docs/design/imgs/write-and-verify-tla.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f513736b2efe020b5bc93de63fb9def75cb8f218 GIT binary patch literal 168008 zcmb@u1z1$?(=beTgEY7_0s_k}v4C_5l2Q`V-3`*+(ntuBA_$0pgn%?iE2YvXNJ>ce zcXrp`|L60*&wG8(7hD&^?3}shz9;UPx!H3Ln}(diT`;d89^0qM-miEN5I@Mp+#XL{ z9FOmTm9vefEl2TgZLC2%{;B-tSnqCt?(oy@H{--t<0S8d}n?c z8hT79Py~&=Arb!uv28iQCm@d0!g~2|CN&gWMyyZMJe*}hj^g*Ozi#T?ssKxsr(|fq zWc$2pC1yM>Mt!{1Z9Qo}uWz8V%!W+Ex$zv`BiV?1+2I?rdF9@?;DfN4D`qe6O&o^1%Uee9b_V7H69L*z2A zyxZR{?*DshdC2{dvAa=aJWcD~c-Boq6`ktnTAEMOeZkKT@sBNs4&Is<57;%Q$IobW z;y}#b9iuWoAh-Wz-|4w8kZajoz}dju__(I-%NwHpz64&$b47MO2@16AL{_&cHZ_w4H2$D|w6o7b_gOk;K4&og?N$%OGY8w{lw zpZPe1w@K&sF_92#ep$LdPG)~G=hGhE>z)35^x@=v`eCjou#MuAQ$K+`wh7{WHx+(6 z{F-k6fxyPBlH1=Ea4-ZqyZfBdXdo7`+Aj}l@L0HG8+X5Hb@{bpWbuxTUIrzB{glZH z&%c>!`8s6hHn9uEU}AA?e#j<~Gv=veN~mBzmrz-w32WA{Oq@cusGg5(u3+%k8jBd; zSTUH5mbW)8@57*Q*R(pKpUq~?=oevTRa&p!bKEh_#w+KWam^NTd*p&CWKCTt6K&Q$ zQO#5%=)|S1mph2+IQXXjW$uz`;3tc=$!Sd57KOz5JJ#oEPI}u^gVzEq-?Byu6i?&86uB}YhRyTrF8wVbQTo=$ z7-};)D<>DFtkI?=nc*bructddqDtQq)FV(J>QpL>PSYiIP;M9JhEZ#`*t2Pnw3#_$ zw|g55bkDBn?=@peRAL!Ux~?=r@R+?eJ@|8L5;0Z4Jsv3p7Pc`Lgi~quE%9e zK-Z8-qVB-f6%u@Fp65<`|JDpnGXL5g%?B<+`l=Nu8{g0czs7>+4WGl5^~%eeyF3!3 z_1%a~39J^qt|<788lJYWoZ2&;OIY8v7m{HA9qOewK%-c&BZYk=6!} zKt+=gc~@0q6%2{0u1yU-)^FRwoQCcvtVHNm1xELn-#g3@erCilmDs#Sxb?6&5W+o- zzx*|E1(QO#@B<6Unxl;_5v^soy#aCUHA5!G2lY-$2QBYmv;Twdvzb-ivXaXuQ|`9WQDJ#M?MOv|oBYzARsOes^dQ z^;gcw#6)P$+x}13GAIuuO;d<;$&0WaQ{bYaVzi4+N|m7aWr|95jChYJTCMrxO<&{} zv0Od}74f*+&mT$s#M|=-iwRMpsP*s7`)>OF`ocumz)gMc&rzYJI5o^ zHm7YkuJ;|{V){nvK)e_~9WzU^&Zei7(5|1JULWHc15Pg7`=H8oW9XGmRj zbVjT#zq=E@GDKbDlM^tUT*nZsti625z{A1x@US|h{tJ0>m5)!opHHJ(uw_@S_SbNS z$$3k+l=|jpUWa^RSFc6S^saUnUk16zcJ8Jbq-;DC%~KtpSfOV5 ztM%Bm3 z%B-BYe02KUPVMMCfh^JVTMMr%KTh{Ye?oWLuaX&?)&$Ln;zgL4iODBG=BsOtG#op_ zBhHfEVy+*#J(J1Hv|p1Wa?1PI$)Sq!3&X{eFX`C^sg=FNNQ0XWVRrd8Nlc}4lm7;6 z$z4b3Lw|mGQ{EnR+&U6=k{!u&!!sMPdfWLT zRX&xiGe*ZRZg&@LaO{Q6>&}Y%u)A&_)PcCY%1CjYo>QdQI9E?({yRvsWQ zlAvm3X=f(m;tMk52M9u70gy0Ez!(o0?hcFtfdvt4K(q5_BKWX>Iwb`_I7nO^=!+o2 zASj#>jzU3*F#sw60SWz~f<-{We^3E_kjOu%5D50qFcA$*N?>h#8tyI@53M{whJfC33LrjhD_>8LF}z3nXW8&6|5^8+4iC%^ z5&WNacq4kV&a(<6fnW3-FIXv;ENymQGxJfVvMZ#rXP8QgI2S5A@@PMsv`#+V6>E(! z8t0Cei7Ox$!nF7N>25tb>V6#Rc03gGWvuROeZa#k3)^KKd=rn$D_v}(U`V1>cNS-y z7_45swx*u1a*JBjb%sRAu4Zodw|u^iwG0G(L0Rv+UyUw5E|#j*kOMbkNJ^VG4`F3K zdwGp>z^(lRvWPUzFXLF=sRD&m?~ISxNX=V^C4RmOR=?wGp>4MrdeL_ntrQV4^^z`L zIYh7;mn*CQRVUmvuLO_5>Je%)|2=%=jWu-6`)au*put}!-XuL9xeeDg$-Q&&I{_nrwzN$l~zn3O6u#2BgLYx^s z8l?MO<9hmHaP#HyG;9a=r4RJN6hgvOfHiwOI;P3by&W>58A_ViRLx?Da#!fN<^01_ zB8^Fw>_y9qSx36^Yyq~mOV<-myOM65CMBDw2h6uL7%jB&1ZJO6#qvj<&sXi*3hFQP zt3h(2yl-IjLzu}2Ju~}(8M|+y)zAZyx7WV~9FD#EsxKltIEDUb;;3LwG;{Xzl%Ngw z`Ob5<0kD2KrwMGML*pfBbS`eAFB7 z1(sraO)Rs1rp~=oZ|h-x!g}+D(#P+}j!q%9s^VFld=(Xx>{sT-^fgaf9|g}4 zRd838g6u*n1~(_T#M(xb-ZECP7!Q)TNimIt%lwY*iq*cUlR2J5E9YTt8KGwY= z5=fW-Cf&bs_1_5<@>c@>D_cXrAn<>3v>*tO^CuSz04L->I1~aB_$Nn0{@dLBKg!WS zxj-ssCKjM0mj<5g|c1j(}jWAdFY= zzmS9l_<4mu5Frs>pg8eEcm@6&{O_xRP$(b)36cCr9Le)H9ZC6{4&jH-4igjvW<)L# zDdBH9g8clvP!Lo^kXIN4feG^p{j2srzBUww3iARr`JYn%PrMmI1Yp2eFc{Dq1`-y6 z@d8B_0m1xWxDo#RK@@hV5O7rp0g*g{z?2ZcW?(?X9|#kGTMhw*`QZkFfq?~qgasi0 zj}Txu0g#9YTqPl}5ZuyW5jb5$1g@m82%rF5z92|gL;%hy3b?HxOX~q#d9_0)V~<_^+Y==@|?K08YVw>G1u4h&y2*jDWQPssNe_2m@;q z<_9(!gdflwZV4z@0DyvkY(QZkjD>^+;06Gq6<9xlLqq^>bL7}RIAA?wXabf*$VYM_ zA(&s7A8s4OU?db0gu4nJ3j!cPLH>U*s0c6>lKSTwf&zuXje&%Me^HP;f7AaSi=+z( zLxGt@gyG(S$1r?fz-Ree4pb1XA`}sl5J4gMRpEbQ8nDbiTO7s@$Pz?A5x~5FTMqPke)ujDL_j13sk|^?Z1_$< zs0~I$6%bEAvH_e7#3cmoA|xjSh$^_FfH(&105`Xg5CovW0P_LyhcM+I$p{I5a1k(k z6v9c!S^mt6;0N==;9(5+0ul;Af%N=ejPbW&{#Ndfc@TpCcf3WkqRL>5%5179H@E!L@8oBL*SA6A50jr z-vki500JaG7zhdB9Ks0dpKE~m0c#*A@N56mU*w2CGJv%MNd+MTX(uEU5)uLe;=e#- z5dfSA*Al1~K-z(iMe-mMAyVca2{3pn6@iLIy~l$QmXB zubo07!axuq<-!ov@xQDC5rzT?yTCR63;WA9P=q8wpo{DI1*t> z`1HbvfcXOv4ni&!Ih!yHUfYF$G!Czyh}sTRb--Cbt%Db6cybj4oB(tUf450UIl$K^ zLO{0wKfeP#4!{C55b$(_h%g{)2muWQ-2Wm#%?Bny`Wz4gL>Q2ufC>W?PZ6LuKu!hs zqY$EN6GqgAzg7Kj@V`ukjIaM4{$u^W_VB+^@Sj%nud9fNM4IQ%?)?8@ha)8+rT)Fc z|3nN>&;RU9q@#eO4Fk-I*oQ#!2b>@Ruav^@y8kE2;T(UQ2>g=-hOjaG`X3r%>>uff z9gYl%KOq3;1Af8$NDu${h>Qm0QsHJpVE-EHKm9pS5&rH}fq$?3$)o?vfd6#1f(Y-5 z0A&V<1H^8I@WW#Q0zu?4gck+>4-nq8!do^Vv;jQ;8vs2o(5xaLa*PO2^xzQ$Z=~QH z0*EYzoIwz%4M4Ae>@J@QX*2$pEU}?`TvmyFTVe$mHtoY ze`F%HM?mCUe~(2{grV@gAPn?hfS-{%3j+HGXuyFTf{Y1acxgxSL4XPg<{a3CfW?rK5E>)L|Cs@Cu7J0#z<&dgGaxw;5C$X#VCKIlLO^W=vMgfNNW1=n z!>#hSW=L(|K128o;aNC_s{|NHN+6B{tK)8lD*c|eJvdHxblpXid1anLFXIstf+A3Cn& zqvZ1}B;C`;CptaM^Gk2r39R4aI=IUJOJ#4s*zA7mqLkv1TAt4M*+~r`buzp5f3VQH#wZ;D<$(`bZk934o+Zd86}hBZ3pjEvM%ThH+Z?`k4b5B&9q zMZx#bs2@-vLm<@ls2b?7FnUwa!;amTs9aKXpW2^T$QGm9>W47gOuw4V zkmDqx&{~ubl8MDADF?+_j$jZ0P6HOLB+aFc z7zv2G91lg}-Kl!`Q+6U#u~#g)EML!k(ze^X#YY}tnR7WgSfGbVF?@frFAUOgu{hu5 zyz!~Mak*csHa#6o!+&Y9e*;FD38^x^0s9rbAwS%pNOyWcg!a4rP ziwcmxF8dPmgB*OLi^}kVm+1TpWw6Dk3DqkfTmcet!->WI{@x*M6j2crszaliX3I8hd!U#qE*Auj1M}|*9E{~FLcMx6PJ*UG(`I>kWj)A((exuO- z+DiBe-5u?#bg&dJO3Sy~$#fmg*VrqlHHMd{eyH0^l1mQ_wgB)r_ojnGt5)_pMBUHn z1^~YRES4StZbAZ3yw`8T8+CDWmvdM)mmLmPUa%2(r+UxgBJ{EJmAGg6!l39h%lIWVr^ zRh2ppw)>m-^=6>LbD4DwS&Ry7Y3ukm^03dJ;k zEzZ?^gr_g{5*E(p&lk}$_H4v;u{tCXt1uqc*N%b5wcw#nM=7noA+^xkgB=pTrx+M( zd2y#(r!FTZ?(y|`qfrQhwk4b^MH`bXKWjBuwW&>?g>^`+Yu@i$w=%kEaCMnJcGaBx z0wpKKx+J*U$hN>9ydRo7#=Nsh`!c#)4_ddp8_HQ~A+M`LWwybv<;1spsk+B?ym+z9 z$d#!U8v%=&KJ>}YkI`rsKJp0`8wqj~kD)jJfscQSmp|iya&1Q@IIgv^b3c~H%i@Wy zw7N}Z+O{Iu=L(P>juvi;kMpbP13pt$DXskki!Yix+)o zYwEuq_Ga&#bLdj7$dVJ9J$P-AWbfqR<+Um&VRvfP>ahntt*)zDeMZXL$2pXNRkLFF ziZ4`Nll5dg=R_BictUn0zI#9c3z8qF61w6k{GClqZ?)-#oturCS_ZE3H6J$mU1qPN zix0Ve#{J|@_Agw@b6!6jdNjyzoX$^ww@v(fb$_~R=kdUEmyZ*cegnr3$Dd$52CWEE zjeC>6VjO0ndC-0P6i5$bfuc0?rFZo>)~8u(Putf>+RCDUMmy%+l}HwgIc&jx3ly^(X<*`EQY&ZYj@5$m2U+6$RCnrxW z#^htaVb9R&a9b;-E`E6)EQqj(7IW7Vlotar#$*9KZnfLbGh%Hp9Voe9N?z?@Vh<)L zAF+$>n=Wf=;{9?|WcKdmxO?+kSb0Uh?Xx59Wx4zPutuhetf>8J`n-&-udOltF44Z< z+!lN;M(;8&VgG*JJ+$x0XO$mUZkv_RaoUp4K**MeCa>A#t|#EvmNTtjr7~n@QJSoB zJK9jYU1uLO81XE}Q+mF~6V`aUNH$U!X4!%wWOoAoGBhxEfc{xNjZbEkfHy9IzRMv| zh?uj8$o_EDjPDbRNuFdTVVJS-W7{Xm=*%UrN)$$2OF(?MEF7ng#6KO8tC^Q7-j40^ z*RX*^)ns2#lko4L+HEp%C`Oln0~XShGqst*Lmxe}V0aa}nCk91`l-Z1*mN&QLcF=; zKFPd(520R@tRI=*2{?+|%x(2nD!qel5sdes*6k);kuAn9kn=9-X;CFxZpE&mL}y0| z{fqFjx6?}Zt`dJLcsHWIlRkSOLS$b3`?8}q8l+V`>Pt#-8Y`Z66pns9Mxo?n&sLSc z9cLl_$osW);2>@-dkw$*z!A}NleL5I7@9fCLH8yw-Im250%EIIN<$szd~u@VIg#C5{`2bu-7ZYIa{v0^akph?78&W7W%y}q~#w3)W{R%omnV;J@{NW@bqL6 z#9u+s+*+{SDPG?qoZ9P>JtVP`T8FoA#PGeqB6N$IXNp86TVG+U7+3y|!%_hmYxl;- zdezme=BE^tk*3t=P^Y35MQF!m0`oeK*KZIol312*@J664e9-swmD5KCl>c??d(KP-W_S~JWx-&4c306HjQ(}dtAl=u~ zxFsG@rkrk4ZRN>W^ZM1XGk*?Qyr!>|pdgc|6efqBt$9-PrpJr-Q+tMP=#E@|PhK}# zdCN|75Rh>_p1q`_8fBK-tR~(UksFSvX@_%kWa{Df zZvBy8sSKmh((=~!V+W^0ZyrEmEmtsSf0K%Pt~XK}^ldq8)DMk*QN=y`XaNbbkC?Ds z8y9?sOB9hq$(F-xBe%uyB*NsNbE8cRkDiR158@<~g`s?9fSowJ^EIivIb*+}?{gMs z#-PfB)71AlL-&{_bm})(_CJ^je9lp%a*5(kpmf~tLoeqLiWSQDDWFl6430Xu{KkSg zrEfWy^>hWMRdnCQ&3wtve#X6qIJDqiX-CZ)9no}TgI;5D2c3k_xOuh;=CF19AY2D+E`KvUsvZ355b2k1>BX3T%8nI)FrOE@nH|6`3 zU<`g~OK8e%&5LS7jxy4d=f`3`ZveCTdb~`*#Kt#^uK7BYrqoOXT?&Zcc zw~IuiV^n^SrtW>RZ-F2y2%dc)c7=vZf@7ZUPYD;1&GL6u6h?vV9RqAvnc4bHel}%Q ztWVV4%|>~y>rya6-?o=2ya3}Z48IP$_}l`@0KE;QoT{52^nltFhwc|XcHTN-_EJC> zf9p2K6Zbv!&G$w2N`3yX*xr)37SY3BM0z(=&1%++K1OA(befH9a9SVkSBek__{O>fk&{8N=FA#WPh7_JhT`E8@m zl`>S*D=lX*;*Wc=qM(!TpnfxB(|2epIE^=z=W`pC)|SqSWzl`pILcf7+o-RNkG$t! zWcEok1#P&Sj~QHqMj3nSr$zKA^$Gf4g`*XnQWGaj$s8={o}Ztvy@^NBlVol$Xi0sN z5+1#%<$Y_A>5XZn+0ZQCa>sYr7kB2?MUAwHMHay4~a94loKv8a+!@$cCv6+`zY;DKFRNIRW1R&g5vz#?)eR-cH z-j}K-sf|YYbW?#e2uU>_EMwiGVj2y-<3ccQ@>Ju81caca!2G#-*Ynprt6XVr^@)V3 zuVEdxy_-g9R}F6!3(8PeFF&EJ!U)Qj_LJy96Q=ZX7kvCs8Nx$^T1Ch!`c$%k9`*q;R;-rjVAb~a7RPy2%= z-doVP`P+%kxs0o1my~x^9ZQHQ&&!8#_+hAJyeuF4+;-;flp^xscbFBg5y6LU&)>LWPLAsM^op_L+oo74F&7R zI0w`f5m)1$tSl;?-Vy7gr(>{5n{0yAWJxnaoCNJc8?bOS<)*eDBiDd?CS4?V>E^g% zq=qZbh>}ynM@olM-XO`?(0jgvo^(PfGU4A3F=?!V{gXf`t)sklJ$GK+mmiF)YE@lY z-r~^Axc(w<@r@xr-lS+u_{r`KcBOBU*RRlhi4n zx{C19h!(G}4smKJH6WTvDE&yU(W}TExKNEYoK{n|kgD@2XIx#BpYN&s%!8&!)y*FW zHqEeu(_`FCPjKTN-JMy4R-b7L*0U~n+>}ds(BHQ~`&|c1#Z)aaVF`Q)Qy6K6q2>}y zyu}2gTY?Qu9=WhhD^$7W)pFkC{qpdhKKGH0rN%}*1D+pSCLoclUv+bbkJ2^ zSXH|J*j&p@jxox?CVB?^y1X+R^>U;C&MQ%k(ng1}FdZ){FE9li){B>uRF3a?x+`^JU-5 zNpu?iK4_c6##6|CIpedMGJ{^OvVjsiNPLf+31ViGc)M)n9#sIY zb}!2k{`u0TKu)HOvlYr2M&=K8bGjqvIj5xqYV$t+n5{8RcQT}DT((Ya#?6><16-3u zaTISo{eAQbdE(1_HZr41uJpfLF14);K+SuV-);K%E||h3$kFf2H4nWS zb3V({dbiORmXY4tzfbhcBed*T~r>5+R$iW~_HBenB~7kvzGcF<_CA*Pp`#T+PGd z+VR;MeHZhexm~0vo=v89h`T*>p^^Q@w;hlR1Vq0F!=UWbu-v!uGLROusPl&Ms&r*B zhG?Nf=dh1ucPQLz_Ux(H(727HCBt{^-6Jh%2jfx-)-%A4r{N+$P#P*UlMRL$8^!O7 zyF6J_!5oIjnUv>6-$kbpD*cYOBrRkxAd2F!?7VtWxK5?HTbz~OYzOk7s+NrpZ6ViU zYy(vrP}&*?1-I{v$#Gd4omHEywo8{rzy zARiov5|5|dKHI7n%VBBmz0~KlptOf7ym_A7R}lK%KV*A~s-A|bgpWe+HK?-PKY2 zAWmmIow)SHrRnMcRD-gB**{ljJm>jd^=?_qLk*PzWIu=;+gXgv9b`~(OD0fu_yIiS%L{1+567Pxc(hAc}1!SS2!PxVx zFfgZI*w_ENH|^ErYWH2ixs?Je^WW}dS9FwYI)`pXH!3t-WlWBZ9gg&@)AI*9ihGqf_>0e74X9dRaYmc9l-XBJ zNlU4*`GmVeXLxVY%<0Zt1Fjw@wvIGZ@zmVk@mqwAm+K$n^0hv@y=z7D&Tw1s$7*_@ z+1D1blBMHDFHz-|>8@8rd#K5*Hj%g!J@|{0vt5bf1<$Q0Vn>^Zx=Y=r;_X#3jGBjo zu0)7E(5}$XbJ9eo7(K@I<--knAmM>UvG~pP79TF(xCpy^m4l4>6bZ|2^bHZ~aR@A* zM)}!1ytVxVo5|;mZDfs}t7d`|C;qHqvS=%^T#mh97RNr*!E~CWobSoHc!thBVM^z@te#PMIUh9E2Hy;X*Y~}ftYfEA zaJNe9y!;wBCFty2NWEfE_cC?W#=me)jm`;2 z!;ynbSLI^vUQ5q`ter$I_43`#(xAH1vnSHeF@&Evs+Ts|{KGZX-z> zw&G?zh7mgLAntfN(KYL#fX)M<=r2Dm=p;DH--z@a*od06=cFyb7SJjg@+|np^YQZR zg@LCnB);k#;C(D$R!WXz5R9qci;gMBwzk7w=l}NVm~DMDBVd3tHF)H5r{d|k>J5P) zmYIV=1;HZEnNKiZQTmFQAM4=3m#mi>Vp>U%ILwnx-9BRqwru@(+^v%jZ@N;d62W|d zz1QzO&H@d{yyJY{ao9|vIte%LHsp*uarq~HnRKeLuMDoQX$yDzCbI1q`Zpl@(?dVjpkE7AN@I6enDAd4}Sm$(!FxJb_uMaR`JJQ?k@}0q-$j@4 zf;_fq`>2hR1l{HD1Qfkzzne29uV%LGJUJE(d)9x}RNbwWxMO4@OG+@38@kA$fca)X-OkME?5Dtif&v~m+OIZ#L<_TYz4rL36BH5>g{a?#PXgW&t(kF6tS zdfH+f*_QWF22jyCJ8tiod~o|kbgGpUYhXwkE>G9LI{Kb^^5h_BIZ}s5=OJd>xBYzf zHTONw#_B5zd|v9op9GDZPWliMu%pRand$EteYr}-Q-hEC-c)R~m7@j7z9>l%kynng;aJIj9YBlKQ@Nod%PL#%^{4ZhRyRlY`kDPNQKk+iY z?e};->K|zvB&CG1w;?K2|5VHDSJ+n9><2ct2$C=aUqn<~6(Ci6Iq58x&U6HV0$###n7x8|DlK!In7We4sPt1gk#B6fU zxwE$PD`vsSE!;&R_9zTd?4Nv?m|d@8E?$_-pi{ZoNJ7)E!_-Q6o;*GzT@7%od*vbV zJnr2c0_Pl~JUp_}`WFrO+TrUAZ8FAGso`whQL%}jl@o{03NOVBDgA#wyy<)aoGfc||r!7k#2?T3M^2&^Cf2m- zBUy$;zSF)v>+0r;r<9VvX=WS~{8=gg(PH*EJ`EgD{2jYyN`q zf;NwiVDyZW=wy=2J(TylGN!ElmbY|q&l~F3%C*vaA2q%u$<8g#ow>84_}g>Kq5!(H zPRNs$p{%F+zM(x4%YdMD>P&=YvP{0p?0I|Bc2A65wp2gy8~WH?mFG`s8JZ5}M+Pu` zeR>}b$EGh<+}vqUnf>KS&FYqtZ-00GU~q_dKk?ycGB#O&Wkm|GwPDq^%kHR&-8z%9#`yGNmh=D89~c$z<}~iS^w`A0z^yKZ zg^UpH-!#IR@4o8wo-Ox&=03r}n73H)J`vy@4CiQ^4(K@W9QY>98>d?6|Bf|_ceR7+ zp~QAk0+}54R^7*8J=kqcw$=dOXF)}WtuHD)?mB(H)vW$l04EnJ=kLR7?brVKIoZ!2 zy5xtwv5|4*%#(rfv6bV72ID`^7@TQ^bAIfh9VL-YyohAE#U9h_#x`YwF?au!t8-%8 zSbSBW8+*g&JN^vXKivyr^{SoH2IpUWRr!c{fBcM;ird(wdaB`Z4=d@x7D!xRhhc6x zz|crdl%m-Ghb2`;mFxcM1lMOWMpIY)AEhhLUp{Kx8c9-UR0r2`O8z!u$vB|@$Ty=u zbY*qtWFb{1Wy} zy9gLsi|+u9O*SX6ZChbGu}S1wFtP4VSEq5ZK9hJim681}t2B%KG@#tYcuC#*70-9G zbhI3*-{Jle@~ifK7_U7e8_I|DPN)5^F?#*WB4lkTUcgdwTV8#@NWp$|G==}=V~sr`y6)fQ0igEW@>VC=Ib-cKT@DlMm{ zXYlrVYf}Oy_T_Ah^AV2PpebV zUnS%AI(JeDTNn&q%WYHc>N^^Ltd?OAkAD1nsxw@y|7chq=X-A(LoZo{dROrQW5x8t z^N(1B+2`b5bg(Y&;FOhjCW%YJT1MN?A_&$AGF2U^1!)A!8Apa*`5it!Wii=m+YKs~ zuZwqbz&%2Q$p24tFfd#8n?W`7BMz zSp3e(?vdQ7(Ypl#X8-Z!Cg42#@sQ7sNTozPC; zRw56-a#g7bRJzPw$ShlPGK%!MD5GSsApg9n0xGzpv;O0kddyesTVC0F?Yce=DS3}6 z9dw~Wl&^)i1DaEsjgGgULGKlog4ib)lX-BrF3Mh)LknL&aIs7@(8~QKsBHZPJ1%n# zgAkK*6eljFvoS*<(6fnW8Y{2l327eZj(=o2>eCYEklh?ffr_6iJ$S)5z0N7z21WP3 zIi+e2pWUNY;FKObV@CO*lPPOlJmJo=m>_F^(B5lfp(ER(Jy+MmX=pg`j!=LyYLOupwhpi{)zj17}{R>UaW;(tN`Q5D;PL)k1 zw>L}}+c?rEvooJYT$hA1LX7Rnhr_wZPcrN%jX$>HQ1j9+;c7x zXBSWA1g=ikDIGFq9y5{k7s2)uT$X5l(n};&D7euK6vW1AOa6q0aj!QT!wfs7?Tx80pz8ZG|1>tH250XunF z&kQy>gQe!DEds{%M>QeT8=&y7FaH%`Z*}a!ONt@4vPv}}n6Xx4tZQN92C)s&dI|oG zZu{tt9L(8|@#^TY8U1<?1W z_4>waqsthV5S=woGRD=^H^ng)uW%y@lkRXi{>aqaKI;^b5m@)o%)KK!H2k7%L7RNc z5r<4l^4_&o28j~Z5xrK%V7B{MpHOf%GlsAQ|gp*Na@u_$Vx6RK&I#b;vi*bRE_`KR{5PRcM+Mena5EI*H@}I=* zeq(#=MJM{2pHU^;JR6N>t+mt6xn~A163$@{FBk`liE(wHXOh7)XCn2CQ^yT^=lUQ$ za&>MSKI z2l^r4)+KM<{g;Qn-Cq~Pa~semqVr7i!Pf z&P<3oHl8?>l`$GFGTASjYiVZHCnJPs8#@E@Fa&p%?gc~pMs7-`?KW zy2$U*MU72rwQ0F4*4T(|1sb8Uf>;gOU4zFc_tQ~=v2;B*z760n`c~6D?NJ^07CZWN z?ok%#vf{fv&UEI;0%K|G!#bIex!Xsi+axB|25~JZMkf&os;ySe<08iV>n3Vs=){g) zXrf*txbJEfqd~sD5z7}3mB)Fah$uc5V45>0-ggXQ?fG^8!m_ZFd*)7^LTr6bmNnLQ zqu-EXy!P8%RnoDAZ`*z*VVt^UqnfkhjlNM5Tf|8Hm6_y3@d*Puk6-mtlYmsh)(rY0 z?6FdkzfLb&N5exRY0ET=!-XzIJXe=HVR_}IYhzb(A#&u?*I`V${c;K4#|lC>Y6&xo z-j>f2V7vS2b!*N1P7M)Ud$wV|zB?zWt48`Dha6SHIP+YXX-;hDlfAh~Bw9?cmUqES z#Ep2j*l+GRcJMm)n|YgLC#PK7sewxK z+u4on*i5PBm8UY@OY1LR`=_M)wW;hFU!;CY>@_NDhZcmG4{6i+dxkMIeu(Ac@0N}} z$x&?c;SnfZZNgtT3H&)=WAXFn%7|?HyO+t0)C=wr^6j5^!d^MMi@k~Vu4K+m@V_Re z+Vcthz`Gv7Ws%S{NY(SD-R2IJ6BH^ciU6Y$~$IZbs9O>J&j zRoREKVBmgQ;H6JC9v~=^MF#Pz1mMjBcLfl)$qOUyy$9ZxAnjoRe_sM{`!n#;1v6JA zD?1xoPvA~{2p%7N8u&Fp)_qSiM>`8?XB$T=_{WEyR!+LWGZ=V$`hNyMA;8VNvbJXK z@V6Oo@|p3O^I7m&@>%g&^V#s(^4ak@@;UK2^SSW3^11VQ@Okoi@p<$4-a#%7cukcR z@GugftJYtyK7ju7%-w&vhZuO)^q*UW;SUVKr$xRN;qQBhfoD4*|I;@lB>G@#PXx|N zhE0;-lG?pQ6CP3U@xe<-o$CuBo&Bm14Kh~txl_(O&s9=-6|;hddObv1ibUxFH3k`` ztA}i|uTRh76O=|6TD)wLX6?;#VU)boK1QA>g}N(LsyO8R;~l9QY7(A1_?fAgp&><} zO(UY|KJh&rmG`cBPKMtOZ@^4gQ69*f*2;bCE6JLRT-^*ywM zd0B(v6H1(qv@Z!)SK*G-LnwW0$Hf&3=9^Z0#-x;6r2G5(q)|_nB0lTe$V*5U5R>hq zL8X^S^j#uY_5v&mm?qKpZJ=f-!X)3rdai`Bit_d(G%Wybj>}))NHSP{3=#LJwomZFRE+VmY9HIHChgao|n9XQ;_oV z1OEaQA5KIO{TM^xjtG>Aag$K~y{s8Vv`Lk0XbxAO6sK92j%o(RXNLB9FgUPE4gc)FNBfD(wLWbVN9A&)z&OTo|wrEYbdGn zMLa#feLDF#xDX6X(@)d|Gc91q(PJ#;r&kyi@gVNj4N}0#CWFeJScLnpm_;F)`^x*YmsN$4T<%XjRe} zr=g(*2V?DO4HyxyBF&wk8}FNU=A;>h6`37poi+Vw&6p z)rfP&Ik(9-)L3~Tam}KOy&8d)&mp$^%vCe^Wcvymqcxbi3;WVWKo*Mn9tX7Bp5lz2 ziqeT|vUCGk2#&nGF%8OJ4VFI-_9@v|-MJnby_j9Tc`JJ_c_%1X9Ou(f7rCuz2zxT> z(PA}a%;KD&ln8U1`1Nc>cr@91L6|$clmQM8F9F$2mEw13$_Jc&LnZOKRbj`Vt&vgO zwWo4S&V_Ct4-^TQ@G#E#=t8Yrg}W>;6gz&pkW(lY&|iOH9<#y-)xO!5MaNVW%db<{ zmLbFGH7^W8>H3ZPf3bE?(UFFMny8bG&5mu`wr$(C*|9t6*tTt(9ox1ltl;$QHG9sU zH8X3@#i`p`-(Rb4zvqA7Hwl~xL@vLAbnpeUGnizMiY*uU2iY_pxSNP6=xI#qZ_sFD zsw+@_O|Y*-oSU!_RGaTX@PeD&2gX!V(dS6@(b_Zfrpa!)_JfPH+95y7CWSQ`e&{U1sQV5Qvy|o%>F)`uAQ>iJEmk?nRAI| z>{d^Ww9^aL#)7pijWV;P?2#4U!i0-wV- zrMN!}PC40k*jc_8Wf?`s-sK&+>hgsf2dzgI(()m1PGNQw5{GL7)yo#?t)ZDt9%`Vt z!}za`UFNcO4&$)ekXuq`-K;~XQWd5`+rqULqN0?2Nm*%U7-HD`Zb%s$HsJ0s1^wH% zcWNhDkF|eB$mA!yLKOe3zlwYKN($2h}oX+!uvTK6)$!i9;O)YA%boAQn(p846<`v za4(7bMTtASE+h(4x|0XEuc>l5DZ-yk@<;M5kNhbJo%^X|H^T%w|MNu&6d@-)l3k(o zjuima4990?r(c*>pc^UVL*K_NrO%>L>I{S0`H=WR@T3oU_*LI>1iZJWdE}*saP~80 zuRb(nY)PRFRe_%3d(ZA_AU&C$Y|N>0%2@2&&V7XU2yg9s*H4-0_PQy5xgqjCOdUa( zt(zW}jq~PaJT1_*J=@IWDkAH>TQa$kmz=M`$nzSqR2*ym>A4)@(WmM+{SP)|y0)HM zdLgfTx>%+=K3KG&uvT`Gs(@HHXMb;!lpwo?UIxc#?9d!$z+B`%4Q!>!jy?C_+mp@* z?KaTwe@6fmkoiy4tlYM(7Z9QObDN<6^_?-mtUCL;Lb=vc-nDBqGxI;*vap8+?Hu zj3i0*dXJ6I&ARA2SbDk6`Q?@Hh^JO55-YKzLonD*xPh^;(9hAW ztnt-s*dXCtq!#W(@ql-oS2iMvK(`Re&L%V_qYTO>E+wA8=IJz^}<+deRE6Sf#EaHkCrzVB`;(20Q1QQW35Ysir@5#ey`n& z%1V@?!^Fc)j!NrY<;$XhmLk1N(QoUuY<)O<544DH zPH3)Sv9wcm=?FSFD!56M#&kZqa;emHQ7wi$GPYzOeol~_tH!ky|E^WWaJIN22PC<| zLqliyIJP3uTf#^S;Cv4@En%~+>Ke4?x<3UpI@a#{{;mL((5#vscW6dWWx!$vBofn! z0!2Nm+r)m~9inC|*e*kGk`>rLxuv0K*scplktHRFg#ZSHcCTitup{zD{siKoQvbPv zCx`{L@2H@Ve97$$R0T-qw&_+%ekcWJ!W4$IJx2DtRxpb;lD34H9LsV=5=*nw5+B1;6|J+QXWmPwW0Qw6`Vo{*T7ZH zat^ODHma&iIY#VUiZnyE<;z^@b`FXVBt=9+l1&NEK&=;RFl61-x%Q+X9Y2@IiKU{m z*hOVb#Z30a|7G#~2X#v68Z;vqI4gKMl?(AU(zw}sAPX*?7+6x+zB2dChYgo&F)9=}VtF;w@>0~C*G?2(v# z84D%l4!&lk7gk++f9EV9DP5wexoYa_7RToS&q(QJ=u0lj)Z+*@UhIo(kK5pP3w0Jx zGsrjs5d6w*Woq6|syy9sGq@2az`20h+xaMAE3sdTq6!)8!~pEuY`0g->YVhSkqBql zBcreFn*n^4#q#%ldh1rhvKz*vZa^PCR>`k$2TY-?1O(gew6%T*x&}=*s6p^WBvfU* zRHvfv!R9)ntH-}*YSGFEmH>H;^=8jsNHbU1Pma{&>;MEkR<$ZUX7;r#AqNE|Me0)h z(o6p)!X7{Cw+O6n-qkD&eea9Qg(_z~UDIIQx792*(^JTBlPpE;l~RD}(bX*oUfiO) zdpnNqVpsLM_bw~5Pr6sF!8+qYi-ioLM`kg3B8prfiT2BRCjV=>72O(?)RWyIN>&9? z1?p?B3KqojbMd6tTy`Gs?Y8(Q9KGY+94xmq#|-LupTBm=m07?+0pH}DS2ncqZ|RTn zc@aGm<0Cq@6oEIA+)^E1gr-3^>`M7j=a9kCzSkr2n(;F%$@cInZfhX|E(=^fbjS*6 z)>T35l)I+CZ~!FiZG-7o0qZbPlTBUcWxE7v^( z-$;JFu|*pEG(ExT6-;2S33;gD%%ombfDY|N9_KwNb$2KgERiEk6clH>=NuPbhd$7>b33N^`pw7V?b!{2Hp#y>dK@FFY3T#xz1+r(57O1Q*|Y{%-=UNy!%e0m)TjPs}L-4N`Uomg^SkqxySCSG>8% z^eZ3p#`S4r;~1yNuu?nHaMKR*`!&YsjOnz8`oLNpUlOr@MVQs(eVp4hX2z$l83d9g zl3TG4)&32WhP1alj&1So3#!mA8AMe4qsukYxfE^`M!%r?Gy@U}NSd-0l(`e1mF~am zf!r^JrdXx9)Qglb}!IebNgamDAS= zFWg*oEnSn*N+^b;o%)`FM^{VL$ywi0Ny{gn9z_?EB9m%KAN}tSHC_zxQJ0IqXLs21 z82my(8vQV|wX1<%e0}GqTz4JX(sO&BTb1gzxwc||MT2H!yIVJS-cI_7B~zh={Ee|+ z?Sfc9qRtGulf_ktzw@J8{6A-;_hk2j;Jw_FfuLsw%509)89)kOyz0i~@Cr5E!3jD8k925C+ITm~Jf>sE)*RlTsM(6ZCK4P! zoD19vq4L*9%K`aQeyT{@Hd@-trJsX3H@%dM^_tc~Fw5-CMsVFqhLZ{z#0=!0`2e%$ z>vqRl^_Wcf-{-AuOTYB~&M;y8Nm!QTDqDIlJb2+CAa>$p>Xa}#MigKAJGnLT8x!YJ zxqgOAxJ72}njv=_ZHg9`N1VIoFaxt;&R&3{w;OnIwb%gj<9aWr`){;+cewKVgGF~E z&8oGC{VR?3Owj}e+ycMc(Q7O2`cVCTK#GWh&JI#5r0n$VnH7~a_H!xPwMbsP99GG4 z`&_s8W!b+g9*0xL0u;i@t2uWcPvkFm47M{1oS2Spy<^@6S!@i+pZ9zl!D=t=lNGvp zx>LVX!>73BDi@aPp^fHK-efc)n!Cl#GukgRtZ>ag(W%IWR=^dX!bbawE*LSsFo6~> zi6S_pbWVz$@|2oWIR zXokMHpz!FZm<7Lvq%mu?GX?}8Rw+cX6Z^`sw^0;KldJC^q{D%2?b?{Q^iXSI{8SNc z&K_u(w^tf{IT>pyG;>$)_uDfg_UsF|inHCk2brGwiszB2$t-a3rBLc?W0@Lcp;OL{ z*RB55E*pg@D08+cTf7&x`|tqSGO;$9XU|$vs%U{)xcz~botXMa0QtZX>7q)h;#RzP zD%i>RiiFBM_fVMT;BDUxx%P(&NzI5nW(c-}Z_E6q*`25d-8HoPLlT?=PU8TL^QW^* z04){$^J22g@DpHkkKBE-A#1@U?S^lyQgCxr-;A+dw@$-J1Ng;MA>bx)?&N-$BiNz$ ztH)jjmm~eIvn)sDC+V+m>E)Ilnu=#BnW;Ae*^_-*4ed-(bqMTQ{RcvSTsb}N5BVhN zF3C>@t<3F9ZXwA44D9=xC*kxx$3OLNpj|l5uUhtKPM-2F)dFSXl==s=>)f?O?us!9$Qk6 ze?&zqgF2d%-K3}@aNYu#Ewnz1o)MG@7UYQK=u&npoZR8j8s_tT8HRHZNC8 zYH*kb8dccn7WGl2i08WS9XGj(-s1L_JdN-u)&G(+75O~Rj56j$SWfR>NHkrc(=}7@ zn&)^95Lvm-G-or})Yf>lXALJucS|sKW9Y{A?PAm&)w?4xuL>r^8&Mnr+8O-ickrb4 zdz2nj!#@bGtCaZVng85eUkXz`m^!7cAy%-hUQ{vemO_w(Fl-dkfzW$zNa{ zJiqakIM}dWxWzwHGYi^tftRGB^04>fQ7N`(s=`B7>GipULd zksYH$c)N9xV&X+&+hMZhk!C>4sKSN~L{I^04lf4Pi>>R(d9auD=+`Azl@VSh$4NuPpmJA3Z+%T1msJ>GXw$b)e2@zVb9o}n#5w$~ zp6sphk(?K3{=w#<85&FXtDO~}zR{CVa8Tq}Hl?+PmT7pLak;(XbM&iUD-r z^jdjUSjHt5AiGCGdYs_if&IJ!h{K(>OJXWC48fAHv*Gg@$xHnXzfRz2fO0i2-Lm6@ zoC$yt*RUC>NUk3mIavS)mHC-h%9CNmlUvicU;_BH}n;c1{`4hgG1^kmH#e!C>fu<+CG5$UF^h;)|UbXm|O zDjA2-yOP|g%e7vJz1M=F)l|PA<5!*~6*|e27|*MM+QH29{M!!VOLN*nwk9sOt#2-8 z*BJq4N-vd}@Aze=6Qu5mr5g0`SQG4^q^b~rpXjA@=Hh*o%llD-urONS&g$X|Od((v zm^p2%i!E}jPk4!Ve)C#HKKL$MnzzKQ@%yjMmNYlP-nmITFt@s``gJcpAf_I*JV3(A z#CdW__!@OypIQ-|tSh}ijzB{sY+%ne%R;j19vO}_LaKHC_Fzz=SFd-XH}5=Pi?`m+ zf}tAAEp=tlr2ZSSvWtKxYUiG@1sW*87MXla+GK^c)|p-iis~|d`be5>iC$c| zeOycCVdF0=7$&(gNYmOyPYct2_R-!hysOMCKoPB#z!#zOre!yHl2uVq4G19_?aOAY zO_^0rr=EGBkIU+Bmy9p_doNF%Q5D^gv=p=LKj4>7O_U$+9NC>oyQ8w9?+ymdXg7f&M^~dWS@)9G@C^j+qka zVXeDuhp>c_dgrrm?&;DxJdOevBnQtYydoNms3;%iQ4f%=w_?HI{ zI;?39fP(~lw`qM*5flVmCTwfi+XZ_e{l~4gu2#wP0*(8NSsopy5MqZ z4U*W8@wg?OFED-D4ccLkV|xMG+sI-1$GdnNRRG3nH*RR4lM();>c$Y-vUe-X*P?1W zi^E<}kLiiM^is>seo~yki<+xpgTB&se> z)%EE4A{Ud_wJzW(7SuV3YPGOC=;rrN>G65+j#_mPO0>7Q;Z7;EveioFQgxHVDInL= z(cwjQQtBFE&yp)@r$>sh*d>IE-^9cVkmXcjg2C%2?nP>;f^U1T+)fw^>P~!D%ci-{ z2AyFFXz?@fZX*xphv^Q5R@E+J7g8BvEHyl-XX4ROpvLaLpT~xbO&c)ti-OzLX-%P? zZBp`gw?NLB`swv(3A*j_$v5b(hoS2K%8G3N*%SExg%yRBM6{%2|FNRH^#4H>=~-F+ zlNH(i6??HW{lBcp!To<`MSAA{_T_(-Mf(4SkpEX(GLcH#Zc za>(^x+K1S=S^h`m(EluloZSDP<E z)=x&Jk3{ZSrJ9#@6A@F4q?#|3aQl|#KK%$h-D$beZ(8Bp-2tuxw}IPpkD%HB__v=| zO07)l=%_PD7$DN1$;~axBcMdT>p`H&#K6IE2IisH3^`H$f}h-gN)nfRVG2#)LkgSv zO(L^Fmc@zuVBPAqkzCj`w^))?+^F6Xq2k*GcBWU-&m1Gd;G}y@vL=)Ht4pu&xbMR+|To5!glS`=Y z$31T0)%*^!Cl<(|P=5haF%-oRnl+Rwlo28nR;4Ydo4)b5pizw=h(P~t7ibVf|2M-6 zCs31^>%o%+99F!eUF3JKQ!!XSL3|LXn|cfQUjZut5OC`kIA~$7e5N;KMrb_Hh7phe zPejminlmt>bkOhkQ&9Jau0norzP(68QrIt=kPh>o#W|>xBM{+YzQ~^!ir6=ZAttrk zns;*+If+z`#ovZQ1o6#H-|V3|WmWmis5d7ts+wPL1MQJthps^dpoK&*&c-mrAkzpS zkHI|up6GaV2@Avr|8_B@fChMqcl@|w7VZDB|I9Q^nAit33-OIZLWdu6tgai-r`ThyKCJs07pduT55xm<4{$yiQ zT4B{Z{$#!HGmVb^_cpMgAXY!)zid~~z=2T^*9rMuo;rZ&u1`Pc>rZ8Tvm|JLzTmb= z?YSYo?UFvVn@bAipZ<4n_&f<$G zp$|mY&-iab<48EXeIRBKaSbI7DTqC~1myU4c;?_&qz0}9)Qj8OZ8;ClFO*+D7C=QU z0_r0{q>d5@Zwg>vg4fRn=9R_-liv>izs{axlxkQKnAsUiTXt`RgGyC{+n`<$oF8%+nt7l zh9y$Al7j_dbtUR2I934I&J-qr2*-Nq6ZLJ0eN!Pkxp|Ide`cYiyMCL0uW-VjfQ6vW zC=}!`6ezG4&{L|z!tZ=k6|uZ_SyNhwd=z?lSk;icod4R}rkM#`7ap$#oyQ!#48e#SFPVR3Cf7`Yg8Q0}lew8B zsV3&QC~k?@{zpMMAr;U|&l(zsW16tycU2AlVm77o5Kn5Ow33PnlHgQ+@EeB7?w33J zcjERCa#EIgTFf!cXb*fE|H>g-!j&MirTu+2_h&JkpufZi%|)U-YuY0#(wn1RC?o3h z@{S*EZGzq7E>-IFU3xb4H?JJqbqgSuW@nuHoBWlI2+ z09d#>irCf-2pU(w1zLAO;c0(|4(_7~Uk7rt0_x!XzgSqVuUMociH+j5q!dtzK6WFbrYS0SRTwrTTN?XW; zTdtyZ(O-^xx2-t-iT3AfX%cG0;VeV+U%n+RYSrz*kbdG2uNq{vD3*pKLhFUxdNtHw z+L*DC$}gR_Hl(7=C<@oqH-?7=@g8UMjBX(h^Okaa%l$D=+uQT~HJ^#WOOlB^DKQ(C zkvN$}`|-m?ws6eT$u->lCdYYpS$-$B_lUdVVdZbW9-It!HC*F#5GbZ~hPeGe2Q}1q z)3v@yvMGgfXL7}=({}%lOLuJq>NZuK+0cL~i7WB@V$aVQJPa*WwOBX$f!Iodl60Lo z4&mylPbiDkq;~@@4+qW50~w4FlFtlAdBVn%4T|6*)&d^P`HQq)LSMIx*q9Cf@-;SD zEL`F7yUlEtI3fo7O-^~?FTTBls4SriByv9_s%iZ7olhxTPz$hh- z6C052L$F>1;d|J${N>CKjAmjxji+W6t@^F7S`#LLd0`v${eb35{4!&#sXi1h103As z5_F~PkbbV@1TDzB&(STph;=B#*ZI5C7C(3Nv6P)Ezv!d7w_J|C-Z zB%J8$ZSbSu-*RoFG9Fl=1mvA|W`h0z=;PQ_^e4J8gO=qP+cLY!-r@gS)8B=C*s$wanmM!5jPmA-!MWm>}Osu_bL1?JDPeb@zMZ zY&zlCaOKQ8C13ON$-XQu`i$iV7N@4KOn$t!pjVH@tZQGJ`d~dbV|? zYneuxyvGu@AO8L^o-DOY43z+7An}AIEy_WgADeYib%C9=ROdBBSIoZ!Cwqf~#)V|8iatHi(6JIp@5H&e2DCF>PUEDaNwH@hOt~q_i2so(m_;4n zBr7ayUNp!V;{F*3-4PjDh8Rg4AD6|s;LY?3iwj&u|we>V#T#>3ynLI zsZ8REZCO8srLjnw?RWUYU&ZL*QRVwGAd(HQ3;bq$^)LV`XPu>w%9L!NhdD_lTHpJM z7-+**@z{QblFV#j4hJyu4Zp6j_nFSHZQ;!caVlTb&splq9!O@nUZ9rHcAesvhF3Ir zNXYt`&lpmwrkxw!VmyN~v5hy`f@~6iIX+eA8T0lrHrl$WIx?~n$fU497KoWDA_xz zuiizr!EB^!??%0N*SXa5<%meL!Az^QoV)rSRZB6jREu|jk>v1b_D=P5n}+#BLG5+F zA%aBwkMM&O80bzOVi*$`!78sUA=1OCxoce@; zaj4qCeoF9XK2`dCx{bnGig~@I_c#}8jqtP0LC8W5FikzTc?Os8sp62rFXcd&MvmX2 zq7T(mlipQq8N3RY`FBM;-^!YG2m*dmK+N%8U2sEIH;(JST+7y#cK(j^y+}R2YxK3{ zR&+%Qmvl6-hOV6|bOy?t*JnMf0KPN@JS%1PBQ}nKb{smOq$#jnvYFcV*R@59%^xph zKaIsO@z&?aCJtKU)f1&x*gDrkqa<5<2t6M5xsP(&A;*P&=>gzy(+`q?@D=Q8m#Os; zLP~1*3DdriPwqFEV^-1a>6cEmj;x2d$xYLXU6$(CB<8mePc>iJ2pE;lJSR!r3L8j4 z#3f`dZ^k(`ssqVRE+_}zFLmv7{IQI9MyzH{q2&zaI>d~m{weB`7=aoyGe3-*$`|R~ z9)k)vgVY_t!;io6_bC71A8m3xdMa&>9v2WRM-RrGB`&>VzXJ-Gp*9Daj|`c4bYu!B zyYI33aGjp`i(aEGNjHY(PLC_UhsKvNR4obC^{G0{$8Yy!zdp1pi>8`JvCA-uZNT@eV1A4pI-P*G+>|btIqb&o);zEz zh;hrhz|TSxuBvE6!p@{WNw*uug4^?<>q0%2|J^Iz>y2k}MeO;s>z%s^N9c<WUM>@Q80ZhXb}?7X*#AleY1Xb$b^Ic$Z4k2> zwH!N0j>sQbC#bbMPW=r$Bcn~Hy5ue(1zi5DJeY{JAg3VqO=OgSiIPRPRxiyQ)Tk-= z`3mXm$r9S_Hoi-&CTNMp>dI033R#U%Y1AW7n*P(G-Kdg7Q@p3~+~@2<7+oGO1>TMu zN9!p|lfnUQD1IEM#PPee2k-%^5K$5+|Cq|sS9KF4x8>!kwN};-c*%@i{d~t8ZF|&s z2Ua*j?>M|uU1z0zyf<3nN!*rIrkud}1vG!D*}g0lh*&aQ+6&BZqvoZ;*5#}Jc}}-) zssRA3COh?`Ns^9w)!8|}8i}zx!~&=Gz*-M@AS7=tHZrI@zaI@uFO*!wqJ} zG=(5%)IMj<%oX}KF7s`E$@-v7Xg#kaG^p8xPJ$7+8cI6U4!EED!<{D=HkVf3$4EE2 z8w#Y9d@hzGP@MMM0@;5RN=XDf6QE|-oINn!4RRxM{`K5H^Loz6oUf|Sf^X(t`s0@v zYzt&(k+sAyrM>Ev^Ivxz>1N*2i+UAsAeteh9l+&JgF9UOcfMTnEVelG7sd+juQMg# z>%HO!>IdT6HdjG~C?hbrgKiJ6*wX0)m%ufbsJJKb$r+Iv$sCutNP^k6Oh32ou{_*C zcjIkK^@n8b$^6+R?op+TlCjiu9{IXcU@%QAWQj|DiO09Mo_Bx&lKVok^IDS=g^4gb-Vk z81w{6>^f}~c}ll)Tc|y{<{}09-=4qrB-R!#U>?b6FKKj4DGZ<5%2rMxvIFD3PD08e zeb$oG@?Q#V9oxSXdgke~8rp&#Sr^1YFEiA6ZU5CJQry4hM@Yk8R2P-Ftk>NNKObQZ zUS91WR5C$Y3}1+!KVse0D=LaiG5qVAh??kJBDHXMqy(7p?Qk#Q5?YK%HhE0SSyI}v zy;0Qlq17)akn*%-)bV8|Cwx!CwarwOc>3a+-d@G5RIar?F;!8_fQD(GRw!S;JdSS5 z=lA3oED3Eu6)!};;W>G%BJAhn;Rn#m`|nN6LE(Al#VN)a87)gXLp?6zd)6nX90vx* zrEmxSxq2?yh-oh<74@%J1!K&hats8M_jgqKf;TW&NxmT8>+u|LXG812wt0R;bhg%S zxm(2!kZq@;EzGZkV#)bNpFJR<+gZ zBWhc>NN`bNsJ8bli$Xt>^{TQrF$TStXgV>mEu3Fl1Y1vddJyN@Z~|HMTS-}_PjWUJ zFuseK|7us85tX>#oh|5jdB~%eHDYBcZ+QOBhJpo5--Qv3gQ3%x_{~t(cM_nhvY91C zHVPt!z)GFE_kQnS`NyER&|s;a!(JI8iJ9X z4Ob?<#)GQwpn8-~J!_tFb_ThVc%r2PMUYTW46;Sayy$9YTMib;GC4*1lH@6Z|2piuXnWxxxU z1eWLdNrL6e zEhkmyK?3pMms|9eWkSVn?i%Ob;ow>!8n1Ffw~~L;B{W?4rPF z1-(CA#YTO05(H2;_8RaA6F&~3v)FCB^L2DGZJyFesD)lD#TeE4&z-k76zjiV;GWVz z8V|WNIY!A;%IWkR4KU~QXv3x_TOc(OH9buA=Pe5rcG48h&|~5v%EagN&MYQeG!>K00Fg6Absa8Ax!w4@H8C@nH;$Y)7MuMx&vokz(TfSecG-ouh)j`v+4 z*{5~2WGE}3Eniu95ymBGWjJ`h=v}3p(FzvNoCT0E-bY??UoMA4$sW?t%+F$GRc+hGOx!=GQRYbhrX{E1rpqcf;s$wzfW)01dU&T8eWPdfaksO6WUseA^LN z>N)eZ6A5PZK{nx4hTX{+@u$LmedqfTD0rsP{+Id5%=%a25AnClkJR~r#ZAwq``-aB zI@I9rjr(ncd{I|SOFLQ)5q^T)DQD(a=I#ofN(-5>lT7 zNj%c`4XvtG+D?*xOYg+q^whuaF;v28xyMJXri#W3zYs>o^`3nt261vGM-nQS%FIEJ z9Yv3F!-m^J-JV1xiQU#E6_ke9cv}lQRg*`C@K}i zkliFDRmZK*6M|^HfVr-uCExYaCVAaTJIeFErK!u6&`JuvyUt_f&3c z)uNnD9^ou`o?c@xGMWk?_zgLgZ2vu%GjO5Ve)t1+T=42wDRH!3dxbs&rwy5X2Q_>u zihLZ}yq$6JKEdVPhusa|CSCBtTDGjHxMk%($0vKpbWU8X5YqQEQ1UuU*75}@4_jHnZ1qfS^wFY%u?ojZK0(l zSb@iWrh$fAwYa~CPJ~ybnj<3@xH+@_D!w4iJX1D!I5)~jU3$}X+Ge>wVzi9dD$mv? z2e0cN_nE69uToQnFg8eTxpm-tj<4nXkx1kp;GyP6*G2<)gprpVIrXb_^`+*cqaY{x zs{y*D;o*sEPrrC(@hO9&>pOYR|1|y?&|Bc8)Be3Cc~jms;lGu7yjOkX)78n1CLWU> zFo-U`CDM2q{X&DZJ6ZWVo#zFiDfzJ>C2;iV3zkkJ#fRX$w1(b>rCH=z!8=d!W~i+C zF+Nz~mKKmJXKFcy1odz^o^~JdfWNp+9ltsb^&$Rh{up4;CU%=wkB`^I{X~PdmH2lg zicicvdwlD3#+>=>s334AKHbmO+1Y)4cD1HR%h2aA5Mcj1+nRe~;g1owZlth`R`G2h*`f&TIm`i6aJd$|=-dL^{1uc04Yhk$foNR>A@=uPGh~0Z0fy{@VBGLl|K49L^D3LxT?sw!eY@GDC!-JRLr~Q833!q8F{_k1ibrNUbl1##oW4e93X&g zYx>p?&53}k1I4?z0996h-8@44@qz0aL=-II=jTUa@r)s1Q*;{t^aEb1;|Ceo&xf zUkLGY;DqOlXNV8j%;}lNz-^)rz?^OqZ=SN4# zJ3@jE!U9B`eaM8!fGjf-&xoLJ_#&|Iubk2E2lcQ`(jb#SbKe)aZ?mg6PSCY)0z|~U zz|jSftpGjE}5MFZ=8620i#e#v@r>a@a5 z#u`dtL3_A@0YPpu{U~8TKZQJ-tf20{L`{eok??}MT|swJFhQCn+d%_7)!@O8TAaZ) z1wyv$1$~bkj}6=V`|kR9UK2#5(8q0Kg0_!=NF;ukKMpROS33idg_((Nw?~0((@;n5 z9lES{-|ZsE0M2(q%q?uv9^}BIZ>r^$q3cCEqzsy^d!)@cd=qYa&REkR)@ zkE7IVH&05BZI_HC6p6B++bT`i@_n6*WQQq&L1siZuH{7AL~g#(u&~SGfeY%bFIpU7 zUM$LKKv#)o%;^vCt6^C5w+x2+KAyb0oWQ!d+Z~w+Icpi=7;iZ~roju!w!}gSfpbkg zwpIS)Z8B}KJlQ)g_jn-a9AuycIlvmeyP#;p)##noik%S~YgS@gzA;lvf8lO?GEODlTO>VDP2w@?;Qso$)WtH{0T6( z!}P{b;4T-BSj*&xuWkGsl9JVjc4Y@H1KlGx7CO?zB3BD;iAOKf&==d=kl>)lbeB|< zRZuaDcNlCN6!X<6%Fz-`yky^DQYi4du~I^l z41Dp=#F>GbJK;obItlM<%iActDs8B`dwPa( zS{CLlZU@>#+O!UJ>lkr}KeHvjT6^*Z*q`bTIz(xXl-@bTWk{+;LSLdE?7NSiDz6M) zWW}fagqSDORwsV4f0q@MI9zC-&C9)%JDY^~qoz|s4ldgkZ%=@T9pf$<=SeA=yF7vV zgU*1q9s%;&7N*m6>x?N*3k<{=tGxW)33Tjx-$lpBP8O@RIE`v0(pE4{;qY!o-{WGl z%0T@OvprcW)(OeiT-I+sLV;@-i*vefmIcX+C;xrkK8V6*Qxq*@clw%(*OYNnQo~tq zO_&J1ZUV7L-V~Dv-Gxh1Kb#GoCXixv5-P;9zd>;4_&yD(PL2uc>L*md+@CS_%2H2_ z18Gdc993K@_^QR6PrgD$7s zxEQ_XY<9D7m$BErId4%hQQgmD`0MqGbUe`?ZwGGSjd(sTINi2MC1zbBWF-&WjSw+q z>r7FweRVajS8{}9u?_n&Trjci%vRP``-K6DWyml{n>zXyO&&+maj=z>ZQ8HgO z$jTekiXvh-+3xe8{WRD)9l}DU9xCYtbABaq1KL?O@~{1N?AS`c-iwib2)N0e)3%Cb zX8swP)Gu9qv`C-5brgIL@U1`(4UvSW(0P70pBG+vt2O#2&gbTEufp{z$@UheTMN0m0A`5jVGU*O6&H;}=UizR5 zSu_d_6G_|b-Rzp0pP(xyKBeSl%!{9hg4m&Qq9V%@Da^C9bRYj-Ur`_-(oKPQ7$~LVkyYCuX+8Bw|R0#khY*`4!-==)^Fv8Tgn-zS3My`Ja(CZ#ykjxL4}6F0L(|J11poL;OM-Ya zRvo4{NR{GV*LyxY((%%`SVh5%3z`$>KWM%Kaks!7dzN^j17a8&06pV3C;q@=ML_5Slx9pe{e054-Dv3*+CGZkn$7V{TYiV_fQq_5VQQStQWekzj}@-oN5)#nuGU+WqbQ)k zsjB!5(hc~FTAmH3XLc*J$8O-f4y+QV2RhQ?ygfIjWBk$x02Z!i&z^CDr@dqCRaUN>TE0q4gA>>MXNB4E4NQz2bX<*ElYxgeLwxx=>V7C~}WHPI}zLea#|>rYp#d{i(zbi*f}SnvSG$7)F)OV*B? zqW24fXvnilyrND$6vxivhi%hC^u2M&?E(9l*@c;@AI9f~)hHrV^lMhY`@?H~3H8aOeAcZGtyA4}+Z zDk+R+j(DSo+G4Hkz)<`G=F@^CHnU9a+_n-40B!_${jwO0oC}%7QgR<#u`fvJu{I{3 z`#uVD=GBl=0(U>aRO>f@44$Uz8oW z-+P2Ia<%#kCOH<&V#P6iWcBx1XYAnvy~rGfBIkrh#pa0a3X0H)dA@PN5rwXM=Z1;r zCR~m6x^>SXRwto)`uW}&wTL(%*`T9Y`IQ{dVpbu?i=X$n%$!!p8L6fJdO8h&hA#Bq zq|ZMqtOPXPP#s4Zo0@T$=OyJo<~9pynHv?;$-jy&W*pVTo^N?Dx)t(s)csK1kuCYi zgn1jO)zfgLm0fNxVg8o;Nz$AYdPTBc3~7`HhiuAWT5q&lDE}HP;}9!+D9<9jQs!j1 zoM?8dR^2X=Y<7>1JMNIG8Uk#siF?hxNa(SqqAV#xqUqmrp^4GB$eu8R(P)rg*;3bG z&w;(9)D$>JXQnx*pzlaL@QJNukv6W7Pu zfpSl10{$qtKqin>eI~PrDrCJEe)jO0BeJU+*{_tK0a%fzSBr0(0t_QA zy~n?hg$X3hU33}sk{?NU7tF2KXFLewiLp+k;B&Q}palC_4h6%9vjy@6D^&cg|LJHK z7K&kyR+Dw~5_++f$t41ii%1fXX9Keb1cMk9%F=ULX9)6t>j|Qkm=ETCf*;SmcU@fP zkd-h5+1vk$d^kBz%F+54sw$1A%47~xf%6){gLFH))VO_84SSdX;TpbX7$tNzhPX~M z#MDG^+uj$t)1)e8n+`Nf)HgVUUNb36*?I~r1#)R1Ksv*iaAAME&c@sK%CIX9)Gr2q zh+E|4Y(yhyK$Vpn7w9lC(g>3OMUyrTP-POb4HQF1AtFZ2V(8)MmjmaRi0}){-9lQP zEzR41{G19xb5mlUSGZ|I?ZuY=NK7|WF*W^eF)v4Bu9JgaPLd)UdrFV05SG9V{Ti)E zuq$y9mJfQ&L{rsf!Qw?*Mjg|>ObOPd2_wgqO(G%Drc4_49;$0s$-98kp_|w}SZMg| z%P{;zF_Mzs>qWp#x_F#<{4e9~ruP99FqAZTRcq&^hM6p_gr8E|xMtlRyNOfE&c@XG ztCg$V&VuEvDi$z^fS+sPfPHDG@Yab@SnU|O!TuVtef}vNH8}i`kw1|4Y6=jW@$Sz+ zjQqDM_VC!HRG`V`A%2FK?>uJD8)3bEoU32C{>ub;g0kU`srTk0u#KR20%A=}+_oQ4 z?9?UWGPKrVa~6A>aY{@6*MRJ2mRU9Ury*ue6WTJtnu?*vilFoo9RGREee8(y`iI+R z!uBqOTFI3pe49Tz&7E)wZVMpSP|KYC(yXR6OLvnVi74lsunx-2QLeUd1{>(6PC=E{KQi*s2n0N);_yRBD5_xE z(Mz^)@GD7k5Fg;9K=Buc)W}qfP7Z~v`oVViF25$3b3eSUC(^c#q=%;cQLro4&G#_C zK6Vn7CrXfbKJFf4l{JSdHr`AXCQqdjCY=eX@D>Bu6n;XOQ90ZT4lea4k}xuPQT zBFCoQzsu&dU`)7dT#jU?q_;mJ$+F2Cz( z+Ac&_Gmk#>sV>h_g4zY49Was(2a(Lb{DJ4Gw5YQvrYqKR6vgmFXBRUwTFmOspZofU zUa(2?NZ}FmVr~XBf)o5oPvy@lw*lAr$6L4ctT4Pi2d9I-vfq+t?xd;UW2}D-p%9Z9XfNLJcA)+pg4+&G7&Ci1(;ZEqRV@QSu|a8tR(7o@3% z+RU;=&ZCk0r+B7I<-PD^<=9sGp%Nntx~R;0>-%O%i|rXnr;#=nqu&WJ%DsN!2JTkN zTRaB085g3jvUZ>6Jn3F+o|Ic6ir)JeM|uarSjWEE;E^k96GjT>LAz-L8okm4u6rzt zXR^))$92h{(5hh`&KDnio1a!n0u$Y%VPHL3)-iDK*%p{-1K@yEyt$lKST zIfuWBbNJ7L_fo;71(z&s{#<^jv{eoz37f21kfq1$VD^o+0+W zSf2>^@|QmZ{A(B%SgerVX+qzfZH#YpCSMskl(SR|9kgt43xp&5f2TO|yH7k?8{p!B z&jaeHx;cG3U*9*!5~=U_ge#?^y2(-OhB#E;Kt_kpr?9?CBphDA;LN-`5Z-yzk{d1= zAxmJ1V_eW9KR8hj3Pi$%YT+Up=sG#5p7A6KdJ@`WBI(yS8PQl(r{l#qx1Fx~=*O)b z#1ZY;$4Gz{8IwizY8g^KE`ITkXi#^(0<-Wwy1c|f!Bd+j^|f zQP;g-18PHOb|^)$Vwoxd)@f3mN)t@Em86B#rJN*lhD=4>s5ma}FwazSWZ%+!A- zT>2YSqF=x%C1~zxs;bd>n^{8ZL6_21FY5Ap{)v!QmdZwpA(6}zdIMu{-e(~X z-!?%(EbpfKQo%>Wv=A-#zfg95j}=I$Iku=kYjUf4SXpnE#Q_1epdjk5u6$Bo##!ug zaY^gOML02tWM?`$ZkI{igvw1CgxHibX={*co@|19T-~1f@8kb0)H59(pzOC4|pY<{YB?Z-&yvH}*kiJ(Q zcE(@$#l)}U<~U2{9}<3z@3n!}-|x7?bI=em_IEU21zH3rxc#N^l`=Nv-|va1Ky*jK zH<;}7579UrYl{`04nIwY&Sel9Nz{;gR;97AYCJD4p7KWg)HE8)q2*u)74uu8msgyV zU;kRCD|TIc;jLWE*N3quiYswAO~zl#@66x(F~(Q-BQq8_9fNcMANgVjBJ^RNYBWKd z6;uN3R=>>2(Z#T8iaIj25v*EdrRIZ>wI>@EJ`=E^fEFpK(zl1;&ZnW1XlLYcgILXc zf?uB}N9itg7d(lhvbg*DFX+)T;mcAs2K0jOp%uKn*+hnz;d?OgS8r=%mIbqg9E_BF z4IdlRtco5+R67U9`UoN8_Xw9(J5KEpCA44;c=PdxL;An8c$|5q>v@u75m)|MM*N%9=(=K&=p=KF>-0L4#(I~zzT|ji zb?{!Ck1ESWbTJ*D3I6a{oFk>1;&);Cpgtr86?~OUmIPW^ zsAE^11dYXeyZs13@ z%o~HUGM6-p^FdpJdwrX8 zvvw$VPE%V5`diLEC~^8#-#D7M9e%Z!k!Dc=(V6PDQO5U4z}T|^*z(<#pi1l97v(2H z8)rJe&@9`Q!DTQMV;b;nsJq!{Xm*OW$xX@)tJtBz-Yj=dVAWTgsL5O5V7b7(Y=Umx z&Ta7LBMCml`^^^7fFa}LBuo1q-Ew*0d`so03`PxzHdd!KyKx!*I3eUJb$_16?s52v_Vp8|GSQ20s0gBwg59H(_5& zw1|_zNe!0*{*=n-{489Lq^7VJ#hZXRmeEt-)xq(e42tvM>5-POBaX`}r5K-B_5GAD z%l<_LBuCXa=)&8gnl6K^=f`t1mSrfJ+#at1mX$8x98LFfd`S4BPAu-)%YY~KRE<}vFqM^Bn;+lLE72n7f_uCebN98^i_IFK;&hFmj`|+){Yy9FU##1e-JILTGV5I02d?gz$dNRkQsvMzG>s>^NPE$O88g(J+6~z z9Vj#*5>;KCqQzNF*x)5^jgeOTnG4q^;COjk(%!rA=8qzHIr+tjFy52_jya@zal#=S>!Ce}`P}Q*#Gp|?omvo`z3?sql<~j{Q zT1JbmxQvU&46A?I5 z6tg5E@i5`>jV@ZpC?|`NrgtCe1m)O^M@@7eHky_ISx_}W+c3P0FGIqJv?=MlDunvc zExR6GLq)sEuo|-7<(hPzR)!$*Uefio^JoEGo@SdbMmxN8PYpf&>g!$VhZBWfcZ;i& zR(1tRNGhL?5Ry6Hcfber*@y(L_{o*KWO?5)$_bAr<6r_5AKT)1mZoZG@x{G&Rb67V z){O(dfQ#M3hp@V|V>z=d#ozRof`{5q7EE&Q-XuKln^60e&lT6uljt1Zvy!B>;R46-P=Sf2a5n>sDxXUUDEPuvMr68_M zR&e5D$@M-zj;eDrjJ2`M7KrH!@^OD4waCOwIbB@R=+0VcpyfVcHiVV9*-OVet@Gic z)(h(OvRtv~w#-9ajc)xh2s8xgUA7Ll7B|Am;kaYvq;y!TPj0|_O`H9b@&{0dI)>$( zzb_@@!=LXiC$8lzj&OmAv?Y|cqhZR*+3=%a_!b5(+IJ|ua#krkr(cwuOrV@j__98; z%WSOx1jAhbh+1(TEd_MPo1}s0M_i}>Hig(3My88TB%5OPnv)0onbT*qORVcmaP8UZ z5L<^FIc{NAko1Mx>-{K~8=-vNW}g4$3HW?$XABco9n2f)iSD+uP&H z3){jkATAW$?}DSVS7~!1?ys@@akf3fs0AFkE_7qPyq|g)erxn|45fNw+$B^cfnF!q zhH6THwK;D#tNT45`tbeDHwB~Vcnt=#(MorS96gT(FhCu#Ar9qh}9w<7i0~#c4}i?xnb7 z&%X|PX^9)cJ`5Ndx(ly(O6fg~GZLt+j;5ne?;1N5!&3>X&=w1x*1)wC;m8oG9Jg7c zZNd~#Z7_ZF*aZDZn(EWyCu^u4X3mXYWJ%M(faPgGQgU3ILR_5=8_H3-n@lI@zC1b1 zte@9;1{F)=+qf_;dw_}=AwPPNOkDgF%8N>;sO0;g^X2<`L`x%H%X~+JhmQV2Jtnr) zgP=AH^n`=Tr&j2YZG-shm$1jr5OHORX{9`nbC!blLiSYIPJ<`C4;y~@W<7O}t9G>9 zGNT{)xc=J%$D?4t;99h_jOb&DW5TyWC>tZf+CkQFVFXC63*&_91yfw9A!ZcIWf(n@ z7Kbwvr5hQogzt~v9cu4;KS=>&G{q*0w9NXkGG&-HPD3~`BB3+m@&L`4$(Y?q{C$pD z(!6}fHpVx<|7$1h|HgG={r}*)32Uhe%834R-IV@MfsB8JivQN#XW;z*BfI^N^gbKs z|50@_ax^ryHg&c!HFKu_Z)eBBr8WNJHT>_Q{H%=suQfR}aFrz8mJ;oD zB+NddB8WIt5|W}KVNZYHziGf`7I7*$OY^6Qjlo;|DS8bh@O|~-ICjG`QV@Z_kPpEFz@YpE{s~FI z_ajn3i$Se~dG=|qegnC!0=*JRvcIB4+RFP3flSpQfqDr2^)<5eMR37D{@r5q;~3l# z271_Qt3d6KO)r4;D~J%vUw&6EhfV=Uo8&ynj}{m=B@4)xkrC;7XAcNVJPdXb84k28 zAifzY)TU)KiT$q{rjUqag-9SqpP zAxHz*7Xa%H;TrsD8Gn4^#e--~??1YAa4cK@3< zZWD-M@;sGz6&P52C}^u!0kRH!e9vvqz&|jG3@GaSruG^!g-(H-BoG7G<#mZ@rpn4p2h;lxFXqgbQ~rLEoUyp2@OzpRVj(IUs8lSVa9Y zRDoK-O>kf^6w|awg-D$ zfdu!*Mm*(bNUET@QY3%}i-_$Zf|6qS;ZedjgKl{*rUNE21X>*ezm)XHX0##)yFC^k zn}ylG%j^T<)5F9KhXKx8e*o7Q(1r4yN+jqdhjb7K^wWg?j$fN!8Uin_+({J2`>6f0 zLlP#$N0bh1vur&oH0Y?h5tg8!@uE7MPHFN+7y|-f9y3)RZbra zkeeTJ@rXe=^EkG?+4_snN)&yUA*;^EKV<|MX3u~!<5zl#Il3NKd12V!Ihu>#er$7n zo`bC$u@SIShA`voh(NYBQO@dcqxMvJL8XW5$=IIVq`^=l`qbpA`o;okD$64nx% zM7iH{V6$7RCNa&xy6Nv6+SZJP2>gA{UsB9yt1DAZW>4=b5m19!7shPSsy00-j|P<7+zn@&9ABltH7OZ@KpQoA z8vTAP@a`GmxbO1Jq-_L>rb5rs%GHRfM3e^7Qlf`rzbks1Ehc1z?nCL z*cke0xm8j|&l9F9a&X>2=%Mdp92r&naB;O`yF73W;^BnoE5Rs@+_Iq5`XL~~wo??f z*iz>%O)m|r7`+?WiW(9lTiM8j26xq42as~S486s~+E z>xm?BWF<4?U%iDz4<9t^W(?+@Y6#~Bt=5L%U7&EH){mY#sfw8jd*2KP*p3W{f7BD0 zA((7e3v6EL#J#w`ud`JL3*(2C(%)x)g7-w{hk99kcT8~TV_Eh-o9;#Z5pS}HDWIMJ z78FbknIYY{zI`5l+n3m)!{OE^UQ_ILVE7`Tl>0I!w4ZUQ|4c+5Xr6`cqC3MOZukwj zxIh4xbr7U7cei^w6DicKGuG2UJ+i#x!+BmpLno2Lxsx?ZCdbGwR!DmBAZP2lH*#Aa z!Ei-BX}qz~6WzLIpx2xyER^)~{Nyo{m?pgM8-IH>UApw-OAk886B4Kq7kyN+`ICX8 z-Cpak&vE4(AKb(r-bL*diZ)`!Mf%8leQhhq;uiOET$-;bbFIX!ZY`Q0MIf~x&?@`z z4yiy@AJDb!UQ)Dq4WAMII-7*IJWI?Mz_CEN2_9n19PRvL{XsQm{8r2FNTVZ-krZQ{ z)9L_3w^PjIy0R#dlm7?h8RLL|k&DNIGAAL-7v&r3+Lm9fuAC5FOYs2LDx!jk6$9*9 z36e}BjekHte*xOo8l%2Y0gfYgUZt_U0u8&qi_fF~p**!W&Rign@m+*dm)^C6tL8^y z21w7n&f@*gd`a5iL$xfY8Pm1KE6QJs1Hd_9-<>`3Qanlx+NLI*>tsIq5I4d@`p)g) zjtE8zt6PK^GNm}BfnC}RA?*gLbV>vWB{$}=e!5M%!wuD*>v3$1m+ zWbn>ieEtf4Im#!S&U?6$$vsd`i-(2}Ow2wDJj&)9Nxe`XCW}Ueo}K=?3Ze`>yOMl; z93=_Bc4pG1)U0WtAV)uIy8>ra=iIQ(?$hGoZy~_|i%m&A@1ip-frs zUiG)UPD~}050QOjjsh7=+7qMZAI^1nXGvF#ni{&6;M3AbhkWi@En_J`5jM3Iukx;f z3SSQ);;a!@ouso6EE+6S2a<$Q-5W8zU6TuZITZSUH_}yDzxarAnkm7}xEPO?rquPK zwv0EXki zVHlG0xEbPbJ)Z3J*@=*HjP{oqprSywhq7A5!QDbK9DP{zuWGeWMJqrJR=Fc-$v`N&G50UxHGb`X=Q{|p4nUg9h`nivzSy90k6Los4fB6##D)S1Y=SktMGu4YCZ!VB5bv6V( zHl=AEZo>}FSRfC?bd$ek!bH45K>xM@7 zv|m+k3wPcA;QUZ5e+kYv3ZK4Xm5&%)TefXV>-!3!&Z-+LMOO}>Bn=i!LR|M&TYjKA z31d+6)7{K?SfUvTp$Fk8B3K)s8>Xkld*AHdS*DSpnM0j8Y2D!r&xP{~pRO=(xzcz2 z@zM{TH~1}2624mh}R+=fuFqx)zsw zv&keX9hBp|J`YGyu;kk9W@|6J?~JLD#23h@Cw@4@Ur9bnNfPgfd3_N0siXZC49BgW z;Ov?A+3%aT87U}mzmwiF?E;Hf7KtVU!4t08(naALdee4J^i+LL!I#%Dt#qa*poSc4 z?f0d9s1?BxPjAz!bxz|y*ns%~c3`%=Goq{G zX)BWOwUUps)v&^Pa1NMX=l24{xL(zCovto{B<2`*I0WLr{z9EjnDir$Oab&@i9y zKI&y;QpI^~`c2syrj}r;!uL8NKnf$I#aIEGFKgNx&wO_XBMr{HJ7`&j#cU05W|ge7 ztBPg%CbF^1xD&V(mITAZb9vvPqnG6GWS*OiJ1k@?4QW>U>XtzFJ6fp>VEL4Q+iY$c z+HAR$=TPlM;`SlP;2`&4l{1yyEL;+vIRlomWu5UtQz&+l>A*qpplog4v-!-_1s~3sJNn zS-58mHPdkyEr9s%r^S^WOYcQ%{X7(FUv#PQE!##?xp5f&VhNgr6O8-br&=C)rC5=V zZaVu~$tcD?a3o^~Lo7I%_UUhn%~gCp=C(v#eL-TPk|K)8QR!Z z`Box*WW70uuGnO$_f!DoTuVo(PBcade?3Y{rcyGmHL?)1OLg9VV9qSSJz4gO0H z^Pqm)|$C@pv%#vGYLep6TO^Da+JCCB`hBR{Y9rT zv#ncd!1Fh(C#w<$ys|HBF2TkR_g71|Amf4X4S}e7A^{Y(WO3C_g72C5m@Gre4 zZ7Ydv&Ovy(@h;-?J$3_f@1DXoxvnNi4Ri1T0|-H^C6zUz#Z=HH${4e4_-5TY+c$bVq6m5mHt zQq>An`{fqCgTl_9W)7{B#~8mWz1N?CD6p}VI8pi7TWYqD=lHic$IuN!G588SURvkW z&@Xp;VKP+G1hH|_qY|%FIkqBqS=QS=MIf}!CyT5D zqjl%k7bGJaI_-008%&M8N?#5DVZ0>z&kQYFc+UyfT`+hjF6JgT!-?LY;1Z6opad7n znb4VQmHV9tE50uyn#&;L+PCN8Ce(W}X;~UnIZJFDG~iSau)yZ6z+T@Aicj$aw(VS2 zg*e-3BJRrsFK)SF2IHN))soj$Kci%7pR`=9&0n$_Kg7~Hf9cxo`c|2RA%&6cu)9%l zn{~A=t&s=$Mu9B6jEPF%ZZKwyI9|x3;rKF)IFpdC&}_6s{Ax`wM#<(Ojoi{R5IXvB z${lK*XLRhGVLZLbWtr^hCCgy%Jb7Y5!XAqaC}S&mz0@2PoC>;E$IE;a3H{JhFG=CZ zXY*jaY#*f!b?(Qb=VR~lUYmHFpti_IP!OLUKgFXtruq2c&huWx!!XeVH477ixN%f> z4po*K)Vyvug-^P+wH>No0itthogFYnmOiVGzTMX9o53q0=Hy{yYPYR})T?H9$66%% zB~mwVQ@tI^Hg?SiNYgKG&Qsh(1K})%K_k^tGzkb5CK&LDA+0tD{ly^BDCz^-e$rL% z=e(;gSuYhxuq(B{_c?Y+-Og{L|Z%}2k)y(SLf;BQUYWtb21rGxtVT-*f> zP49T$Pa6xI)*(EZNWXfya`7u+rBbD0^HA({oV1Y@(p#+mmO^~s8PaEF2rIiD$*+rB zn7tV92&Ln8J0vVG45C8LFfxB=q)<PFZvU;^z z74g1L%nK~AVPx|>8M*fAMa%*-(AW6J$U9={c6Z?lBL%u;p+B|N>5&z+G{@9SrofpD z&I16nwQtP3TmbqT8&2k(QwMW``^tj3#)IQR*3x`<;5?G&3#ATquhcQv7xLxVcjGF| zg+!j)BleDIiexv^ul*0bwl1Py_3@O7QqqkD-u4~SaP#XN3s6HtEnh}kKT=V~h*!Jo za+3Y(mFwE@H}NJ_81TEK;Rz4@nJ)en3*YvfiF#jVCYWF|#+tj{c`hdwO{6=Pe@S}U z#VRavK#@CcT_I1#i6)w|@q81RacX$Z_` zY~DuBuvAKG<-`K~#w%a8qG1lfm-1|Do`2&VO*JJo42UTt?tD388Id|sp{SxGo!yFM zP;pp$x`U`8__lIS` z_-FdM((;AidRApw2`NRT|AL@HM1B=pBv`+j@pE#~S8my$3Hk$XshcXH)cdvzWs(!|eu|gl>+g+wLy8GZf0_8%yB3rKpb%XzDGM^IPFU&GPoqeSe$ zTr@LiGY%csX|pO8`*WKBl;V`qP}5LRIUVL<8=Z@GU}~A8cDdX1&p0ks``Qam(HRY4 z5&Km+S$4k1G3iLM_j=7T?aYf01&7+@Lu`>7#mdrc#m;AJ5{ewx6;Na1hwd3e(c+CJt%tBcAZyvgLsp7G#RFuG6w8w5ZH~YVI z==oUFrc8v$!GY#d_6_6~T30#bQ0wSt6=@iuW6i0$N_CSn$`n^Fsj1jGhaJpHzLniA zJ{8=4nv=0xSM7}9n2L$p0Yy6_F9?68n)xY#8)G*Sp?N{TlrGHSvFGTF%8 znOotP8ISE4+-5F?6Q?{$@OtrJy%bGUs^*%;!a0u&3%6lBx#J}J0W2XLAhP&YH7-8s z9BV>RD;^l3?;p=X6PXE|UweC=yJ3g6<@EVex(xg@4Y4o_UxMW_E+KETe zPpL`1TGFah6AbRk0jAk%aJ9r{jN*oT4L{)Z$eMuv4N_$PpUBz&EuSH!qN=8*@gGQ$ z_5V=M`0qrKnStg1LKOeOxc|Q7{l6yn{|8az_&*ax5mOuI{~X-^4+!|5sNjEWciS0R z!Tith3?>H7|G@#7{xL#!_J5D~U$Gz~D+3Gb|J!;70MvfVYl9AV!&S|uYE{g*uwKmu zC;AUtbD~)ET&qLjFk1%Ov*e5K_SRSD4!hH|3E7)Id#!elLOf+9V|WHV9+O#kZBluTbA zKH-HKEKmcezmB(nEuWei5Dqo<%?~CQ#{)1))K>K*5X>ZyCZty!EJ$)|P+@9ga!B!o zcFNBi*lfl;uzzRgW|UtWsK}MTv|7pVGZ473x^&!@wwSW9wtq%YIojybMUT)dj7EDq z*L{7RySqCB&PE#pH-|RV93u!%l@2X%1tCUUBljrWZHJD3K|S7SZGRe48PIu(8l7L{ z67HlfZ&bFRKYKuRGUe#@(Ny2mHryec`vkl~Vmu@TXW-=zTguOdKhkaPGLZjduV4Sy z$cGyt^+!)uw5%-NC2sUpSJfpg(6L%Gp?^XWPh({%HF59i_GL?ceRbqOW>0-beR6gE zSZ1I8U^K8&0Ts|e%lo%-c4T>Da$zNGV`TEx4UDF5sE2h!QCE6vP&VG^_7eCWJ3lnF zdEA-yN|(krc4c%|P-xf8&d#ygv)OTvG=r0q$y{BFqa)~;#OKJqZ1AnP8L%U`eM3Wo z;{ymVAvw_K!eYXao4pIY_^keT&!FAsmp3jRF7UA(ssCryq@bN2qPI@emLQP2IeT30 z%)f9(G@`$Ne|mB`buXBSp=sJ@^Ap1%_Xv{W_uv{Se0eLrP`gSb4vd@`3hKxAxkYCv(xnP|OV&wDRBib>Jc-vfb9 zv9(cMZJ_=yb`5sA?*#qVKk0l|e!qf#-R{(*;%&2`gXMiS?s`^bRh8KF`2WmUf2U6T z?705K-1}+X{!EFFPc1*w(_Yy<{fJxJn44Za5qxcQx3_g)bq5Y?fUo}8DbvpGoKyfY zH99ta>}X-BAGF{g0$S?6O2gIYl+)3p!bt*Ew1^L^J zuB|RYJU&Zhc6`S-{t*9oZG9_tIxv1gI&_0GyVUtuKl5{P`buA3e%HP7f}=P3^|d7B z@#DQmXKijbKgRaq{CVKNWp8*-ykmE^clu6!!%>a&>thE=`3cMdLa+RH?EjH_Qj6Xq zfbuqeFGZbhFG& z2H%OeeN}JVnfyV^zM{=p$pzWTIDvQk=3e#FjV{k@eAxJj>d^zVB)$`TuJBVJj-r@E z)HUrIj1w4bAymV{XsIN)Kk9<2j0Y3TpnT9&tpig z0HaK2jWd$kC^qh14N@b8H;IM?=aYJn{V$eT`Q`g4bHjrFR=~(y;(d0^$@QlQaYos0 z@ii;F$=UHykAx<|@m*c&lDlxvF`!r`ENOY1L5`M5G|99|x_t(VSi=(=8$Ub)-{<5n;am>2ns zLM9yoMSv|T9$HiWfqctw}EPYU%R7FCqJn_k^%;xw4%7;&y{5-!I_ z6AU;U>C{Zwc}#sBTGCq?s7N)tsMZ|W^DuwuXic)1Ik(y0-IV$&${|cj+F4cKISt*~ z7+oeiq(&PNxFe)^y#u5OjxqWRchCRLIKoDfGV3@r)_O5LOxzo%OAdE{p;#6aiLtB zEJY40tgz5isAzAl;@Z8K>A8KDv2!Erc&8rpLnxNooA8mYBa+8EONy~MC^vNY&=E@x z8-iJ_lpnypX0mF7vetW^)KKzN@I}=Z(TfT1oVcIx<6+=Ktr0PuG820Ui1AU!Awt4V z*XaBD)sQ8nbywi!fS7uxZHW=tGv|Cr`!=F1p=r?sM#|=za|A|>DFBz)9rXygutn!| zz17w`f2VH~i2@crA?kY$l$07obQs|qFYRt6Z$o306)gsbFCC0{NlE~FKqglJ|JJ+b zfq?BtAz0i@?ZKlQ$$fMsKXM)3nOii>=gDuNobOZCDU1O8#wSiu+&Gz$1-Y^p>gMOi z!?zq<3zHzcEyrYGjR$p*4ts-`r(AXh9}T^M!eW%O>dVWzR zYlIiKN(q@MZ@)MLSU@s=r4;d1p~K^CGRU;81J@div6I%7$2ZOY)S!13ky`=#xO|}= zE-jW3z`)B4TaZ#a^C_Mkx%kUj$M)P=xmBBj`<9d{k6mo2mdl(jt&z{amev} zQIpb&F;T2cWG5<{0qnzM!BP|oe)EjiSHw+m!$9x@jcZC;0J@PC7abMmvSjS$pvo+8$;sGoRoDZx+6srfdV4@omcxElc zFVg7KL?%!;t~PDc&Dq@LzbK7KqbT|ev*T7o%P`-3t9lB#>wOUNV@BFHaP-Ywk0f}q z$McP!%cmAZxu$XQ}4y#^wXQ)xr6%$`nvA(_NXq3Eo$8}F=`9u z?LAdpzs_E?zpi3qye-^PCPx>3_NHBAN_!~10{_CA%$gSm8fslKf;;jo+%7pSE@UV`~uGT3a^XxsnuJm&3om!z~e zm=sjFqn6e_4s4G8BLyK`)>dOoxbz}>F}e5Y$nX)Q1F24AgnxTVWxyLpAx)xNEle8T zrR?|ALz3N1Ow8x;4VzQ}!KNVxbECIET+9&n6pt1q(?>cb>xp?fWzK5p@3%}oazwa62ZX3q7 zhZ)kU&7LA}u1MM??t0-+Jc;6;!pq*+?3`MT6US5qiFqIpg5M7!G=o_2A=l!~%)N%` z%J$YdrAB<@LXnCz-)Z5b($EAUjjm*@71}Y6x4&G5nxPkdPQ5KC3-nSKH2F4Xpyj^$ zx2Cnc4Qt0m41t1Xe!E>%)$)Hr_zEbWflXLB{t$Y!Atx98?ROiVxK0T-4h(m=G-oUD zC|is!T?Uo1QCC%Wsw$MOWYeTcMN#$+1EHGYJa+0c_`HYUw094?YuFlcYSI*|rocNt z5O~J>draNdm(Kcv@6~*$?k1ZyDWg>;$@zgK>6vSLBXk zp~)=tr%J)~DLyrlRa}lgD+7=;b|N6q`$#f<&U@J4-#YF^dQzBv-OQFcd22L+$^r84 zPWxeB1wyzN2zwjby7Qjjr5Lgn=6@qnYhtStjPPIj8uuUt5CAysosR92DmXl|P&YQc z{MtcT_WuuK_Y_=N9O(HzwryKGwr$(C(Q(qTZQHhOCmq|ioxXkUnNxG7?%bMrTkB=j zuG+O8R@MLe`Q0EGeG=489AgQc32nyQ}%Jsrj3 z2-B!uerl-b15qf+E*(3Uh&|U3Z)rL^JgA|u8}hHnpe;2l{p37H8dzNrjD&N@6xv?j zL}~lrO`3w2)I|^MLogozIJVc8_r*z!9M5PeU^_<@=U*aO!Ku17C5LCW zE-IJ=s)fItJ0U&N&VH~C#5;HAqVozl>1*7+Os+oDLnm4}Gn4*0ed7w6vg1QJOy=}m zQ=1AdE>rSFnihjTx+HK*0pWOpRsLf-$TNnhE9^a+D2?{jfiE$wP?xldjN7=AMi@eI zjRe{$79XvXY=~CE3off?KD|wtSI`?%>W}1)`z+&+AV@Q#o;q1KGRCAjv=t|mxB~~pfUv4hLIvjJ zri7P;KwH&8M&qX9+%JZSlT%WsH8~Q43%}cCIf90#oF&TH(W9+xEQ!4oJM0KMQtXag zgLE;&8ljBciKe%fs9lS2!6T!-908ZMm+W_Zjyv-kyrjBQc$F~`1Pm!;YENM8KZb~E%F+@9U^A?))vnvYI& z4=V%Ky=Jv-TmxFZpQ3c{$cBwtU}K5KtM-&A4Jx;nitJB4tN_aoUVTid(hN>YY>EJC?eV-WVz$4G)CX-kfu(*_g`k9-W1QlL znq3R#UZDc|0m$Sa;9d%M`}qV_W`n4kqBq&s=XixI7Yl~BTY{Gu(jux!T`MXhgu!x7 z4AeaKvxCsJlj@IHC9;X0UCY)X8R2sXiHBZ5|0~>wJ z>T7X0urs1voJGr43)0M)_?Re*EDJZD%s)R$VK%+050PE>1IW<790@=hu_NK7Bf7D2B8@XKz9fJfc~D^YPO0fRsgt5)@l^j0NLN z$r?wf%@W!`7bzYvKJ{!gGZ464n9n~3q&^K+Vge>X8_+-Xow}Fpc%FG4;3GcX*>nR9 zxjcB%fCX7t4olJ-fhkUe)2JqRjocq>_gSxnL|4rQAn5i|hs0YCg@a4f+_K_8QDwT!MVNT2vE3%U!-n)s3$NZTf63aH%0JV3eXt`nWW*3?AEr) z^Q!24>}EXTs4rIy0By95SS4K9t2heS2DU*C*IvYU}%x z>p8*+wA6X0gTUzXT&)e z<5J=?#80)F&lvGjcB>bIhl~CMJRKBi9pf@%aX~w3Qq;!H?yn$~o@>i*x77v=b~39M zgUFlfG>P5v%{t6CW@f541c(g4k)(7Di2If7USZopZ0spm9P^VXu<&*?cQMZM2gBuvBSYB%;DV zs`wq`cWLAY?oB)V-u{e2FeKKkgih3%O-_m{hk83iX1ytBHSu>SwotqzB{*qmIy$94 zCZ{0LRcMQ~{6X6S!;Hdf7ITf1J%6q*Zlf_Ju9`qdd6)li6~6~#$c&C3w2>yF)Fq@3ln zEvnNLn61v74>d+fZ(N1;_=Y$~-;Ck5ea`xQcLyH~Ut7$<6}?H{bLE4gd>3wSPFXg# zdYG=lhrAM%9jtvK^(Oo;jooo$7vsmsIX7sefB#YxC7I26Mc=hg$3?IPwBN|6Mq^EE z_!=%Fb*d6h#Zb0>(WgCVW2+JRF-84%!07c*BT^uzTyiFyOs-Cc>b2!aWeuubbDvh}2m*JE z>K&0f^1!X*3g=xz%Bz923#L#Fad?nTWpu$yW|S8g<`F@m5)&d3-cvsIOSNvSh(L|8 zT{E)%Nro+LrCl-9YK`_%y0W$)p(bQ(WXR8=tcQ2~BRfAgI27@@+*hcJ%E00N)zD!# zr*stpyJj%I>b+Ed5z8dW>2ineOc9bfs$=RN!sYEvVQE3KbZ_HkC)vJwJL;aqNy{gq zQA4EUg$Fk+@$dLK+|wfe^f}T=o)?F1n^1MNC}2X!jFSf#z4!nhk4O1F@wV4@>9MVZ*hlOgBat<9x%K&R>_|Uw^AI6T9K781 zj(ShGQ@wig$K?x?E98DWo09xsES+wCc@f9ee$Q6%K&r2_z9fjIOe6eGyVUzfuxd66 zN^X%$q%KDT;W<7yy^l(P&0p0o}ctW~Z(5YyksIw68WOyg%S{VR<}XnewJ zIO^@#CM>Bp_Z2S1S%j7_=%A?h15!_<2U3&YyJ`arV(ZeIR*@DV>m4DP$zzNmD`*(81jKsfRJJ&^04>c?6jNDCH=C-ABPS-U- zsiAhxN2B5b;w2uN#!P?*JR?EsS|Z?j>8+e3Ut9>ec4XGWBmfT$e@S;@H3qL6x8<-2u!@?6#q;m{pfpaM zwOi&uH+l!F){-(48YYg44oN1HhMw=N;wzC@r`u{{b=^h^>Y=4Z2cIGTFojOT>gP+$ zFjtdE7WHAChNSO(+ptx#Y%YAMDS!!|npQx-;Zdw!tmu=geGkouhcF~2Fc`r|Od2)ao?lp_$C;ccXRF_A2ws8=V>bC_9yqf>Pc@4^ZiP10uXvca#c% z11t)3{DXs_dBaEI9JG?AQ<-NqrUIANon~Zp0Vlp+6FP=90|P#96?xy3sx|M$!Kfpl zx$V&uN@a-h&wQ;e+N4r)7oPzT;WZZ<+kLAvXaW(;%?J6Ppe?b z(VsqV4!wMtRzJ9GTX!$Ky1-5{&Q<7&yYS!QH}l&M&9*)w)nzrda%4cCJ|n|!=sCv9 z;25{;tWnJHF@_e2f|B2;QmNVYHqr^UxS3Es?rBg|2r`(W!XxW*rx6K#YD^6LN*w!J zakLj%Tus&HdeguG$Uc5#it;j>w(qJv{M9zfWJuBnEa(vfEJ)UZz&?MX*-J_{Xc*K# zeWsC|lefASB>m*WAD9_Fw_aRvb<4s!o}JJ!$(0p&0-0m>Bu(W>EO>vg?`dfG#*Zbq z2d%3{N@l!BXv%oca9Rg){m(<|jRP5OPKNm2yuQI(#q zFtq3wsmHCNJic3HCRklV8NRE>hIhQxzP5_E@v>{a!K!zn_zU|J)KDOLu2vh!$+)m8 z7d?-|f_=PP@WZdFvy36{&X>5DSmYyD3JP;6@XV~5Ne=NpBoY1o;PN$pyXoE~LWa2z zH(`Ai5R2!h6tfW`>UnprgTl6s0%+9(G}hKvhh1SjH#*7 z{ZrIPof{^|1CbGJ$rR?H=E+_GGm1%t)NB%j_5*eZKTqx!`$tg?83>XCD_XPyzqyx1 z`nWWkC1>_VuL{12KK$+zGRFdwL@OA_N%sS{Ux?I9(vY6e7<9F7SNTXDpbV;49@mp( zs`T~{NL?)NC&Qwzl-rYnqo?h6%&7-{fDd39r(*icQpArtY~rik9Mgn*@dLEjyoE@z;&SehXS|{e54;uYbRgD8eelRJ9BLnI5Y|9v1 zX3eI(+(|surTBiubjDiiP`Z8XzO6-7{^QyXV)0yvag}c^6+V@?q!*44OM16#^n^^5 zWJ!t4XR*up!Mk^4;5tX18mUK%+9hqr!ILq##PU#H$CGs@_TsV88)V5a9PdvNJaZ{l z(2A5CMHW`$88YbTrqi-?`@m!(tF=LY(lZRwQj@VpPeV##*R*8%)^?J}Us?6LNFR+X zVez=*KG6V_K90&HrG@?xiP#7#A!9o+aDq@LB9pZnGhAvJZiqf4D9o0hga2F@%ymv1OFw%@(ky=Jco#nEDHT9w1Rlqm^?vt?r5nY<8$Q!b1}z9-v)TIF!`mc#;bIS8KS?e=tJ~`nZJ*PKF!@D8$I0GbayI=n zfgkd2>XNbQ2>k6b2@a!QX?S}wC#I%kqG}m?;V5u{t5YzeRtJx z++CmX;C;Iik;OyyrQ<<#+V1fjsY~0~Rx`X4AY%jXdMg`L#>-FLbh}^q)Wb*vuIP-1 z8e!Q$8Assd9%dT!u&$@}v@iN5zW$H3jEA&um)9o7Nt~dNbkjsSx?rwMjC8p%%Te4Z zL&$1W+;^Gz7ef>Ll>2KuF!ZNeIo;!5ObJt2BP8vuVXpYKiG9_3JWIKN6gW>VeG_V= zdxE?<#y>V#)XRJyBXJ|Tu6%|GLE^x|X@rJ*)&P%{Ho5MF(BXhC-fejS3)pK!Mwhdd@V7{lYkQuMCuov|0l(uQdAMtPk^mNN_=XO9}o>UD%5 zz46L=It%>0)N-BB`}tgay@NST3LUX_2gh5G{G8i{AO^eXE#VWyeven>6;7UpV#Pw3 zaMt{&hvZrq(Z_!~Tv-30T$#2I-hUu8O`o~oA$irS#G#*m{_^C>odNSya}LwrIZIoA z5>h+0XeX4m9ahF<)fE66y>QKkeE#IY@J<{Y-0#@frES|c<8m-*KDFRBMfyYj!B8=BuQ4!c097>+g_j#TLmm<16G!(+YbN@u z8#2t5zrEu%^nvn}J~aeKe43sPf8PDlJ5t`qmh@Bw`Hl*$%TZw>UYk<$*L|^jUgLub zLv6dfcg>iswAn!>0>Joe<${}{36f^RGe%~E1;Zdjl4NkHFy{^@{&ebzrJjz+0VPMN zc-vEY2@W!iXp>F?%QoP0WO%j2|LNeQvO`?g%a8tSuq2E~WN)6+yR`|G)7eiktfq^H zraae>_+8#l&DB1Mmq1x)`~8MAmtQuNMR{3raK)jVd}3|%nN*0;bCS|n-9L-A8YS;P z`Y;|^QVtW$os5NZ2Mao|!Da<{ zub`U)yL_vF-G!I}M-)fTxgLS?KotGPYNdPL&ht9)R6F1eYJ`22C=1ojuru{Lc?zxg z^wn3X1T27xkky>8)`3g-~1tRl`P=E zgKjKd;SbO?Mj-oN4^>%<^?1xB1gM@wde^fvZaE5TUoSjZ;IaYMc3vwgN!p}35^g$I z4;1n|XcZFI8Lkq(6%`&1+sn-3empJbkI~t=iOCW@dX{8W(rrV#MLpPsmU^#m$3u11 zD)P+sto2NB&tkBN%L}8{hvaGq+EuWjU7n;{PDj776KKqKIWh;@vUKF0#9eSk~yZL9ksTyvSIEv%ihO8zP;Iw zyMds^PiBzB{YQhzmZ@|`qCjU4zPS%v(ywr}iSI(4;yzR&_QMCBnvaTp%%`~Tc9Q*8ZPF)Z(%WKWXzV>Jsys!eWXSmi zCmGPw5T&h%n7n6sP?RcYNAT(M$Zfqu)5cTd;Zzk(v^{j5JZ%Z;rA9id7V|6_qM>Nc zd<~Dm?+ru6wTlv2MOqW_docX(V5MM$JilXo(<2&&AdcgI!#ozxAaH2q^R)87q>;^H z?ybb=f4#A^O&5n_$W$<_o#f8eaHkB>auN( z=@Eu~2`tx7^&08gthEbT!&RT4!gXg3rz5T?<92J4>1@49{>fQ86vv`&nr+wC!ARc5 zR&HrSnUx-|{yf`0~JjMTeBKfR`O{q+BknMAAS)E8D*5DC)D3pPMU^xb8C zsrvb3BdxKsgm0heRcM&edt0yN4{(#~L8Op?^`&`Flj4s4QcBKSp@1X{>jCf>7@A3 zjbzepM=BY1%6DM`YG}Bq!hI&n$mV=UO>MTMlH;4HgMu{|)7^+caQe^;$y^03&4p<_ z>n-o;HFpvrH+FDDoD5=jjo}BLC~a^PY!a2zn{WzuGdmvZWdRrWmE_kUCVxPpq}Ip~ zaxWMNt;ti;lU60c(0||E^BO^!p1W_Kk*J46C+CaA!Bo*Ty1*-;(s}96#x?SJ{-V;q z>+C{brS;EFYeLCo~Gd9=F5`2LL*BR%r8+%urinY&7wMSwb+ch^^yx&!J3h# zZ;n6rW!dE0K7R%OmuypR(?#fxz|P|bjKmxa<~MT5yt)VkEi6x}&CaNt>o+Rh)7$T; z$z&0;+Acj+#d_{~0u$k^fIfPJKdlCkxL(d#P%%y7H%n~kP({JLD_3MxrTg(*uL;)Q zbo>J_6*@~nv#hkLkFQZ(?eq$u#5mOHf0{xP9-7pZU zmHVK5ilmJ(?oQa@thn%hUP*0wyjJl@Lw)$;?}B!D-mUg}%k7fDC&fM;6M~0RW6Dvn zgX>a?S@L*TZ;44$kq@x_V8I0IxyV(Zg5sEdboq&HhPdm<4$5|a{Ru?s_Lvh8u7vhK zz?C9$3e$;fsG81=*#M|_N5#uuh!d+dF6;4nJ339R&6mfd3HpZI--yc42$XRC1D5#@ zbg(*H0`jD?moDqQYpTLN;~w#Mi5K;tv;zAx3^Qd&^Se{)FbDUqYMl^e!;&$3wjWA) z8XvW)lSh()bS?An?xk1L+g5`7ojqA(rV#IicZf5!hw3p7qzyH{fjh_Kz6)1o=N5pv zp|!-7@K%HcUi-!6Cp`?_g_MeqoW<`h9c=9gMbB`u^rXxRYN3L0MCyum!p+Bu%GiDLR0|?#I;CtmkgPOuN3;-NsAj$!YPz${Ku|<>B$d*hWZ?JP z3W|PG9O7!U_ZMV%-`At!R7Nl2^^#T2nME`qzquHHLtzn5a$iLF}=7lWXWT0yD+R`>KL01Vo2pt$v4UM&|tMUDto0O6W)eIU1@;J!oFY3Ew;Ek!CM4 zeigP`C8CrS)69T7S@S@vzBumbntz+A$H}^?egy~YT&?<=o|9Yv`nRcf>xz#ZWV7gA z=!wvB`>WD%-jnQ7ODY=kqOm(T1}UVUk|NjC>#*tg)-Z0*?A-Qq@Jw3I{v{S{nCBXe1z1tFq}m{X^SlH1L>{v;#;C2Cl)na%Sk7Cm-<_Q{-&m zNUCkHFuZZTlo{Dj;|HGsmK{0biYX^|yCnuY5QN{K)G^**%dzmB#E-iFTsydSvHO40XL|qv^FD zn0UwX8te*mj|CaE?JF}2k}DT}d{@$Dq`%&Ee|P4<1P8ZY4s{=Gsd|upv`Y`U!s4Zs z42Kc>xaHDwZ*6qZvzFSrH^+}5H4=DY8B28T@I<*$Ug{2*)i{&dRTmA$#n{Qh8>30nkZi69vrR;z%$GHr|-1oWq?!bdbYGQbLyJ{*XQSs>u>X zic&$(YR&1>QJ&m{hFGtrFJl_xhP`HJodru=jRKVLg}q3^MlHI?OZrt1IYqqQX^GRM zW|~_RE&4b`tYprGA6vjAtmkvZ?>mj=QR-ZJ$CzoYm@_P06tJo{kZ#*1wD^F^FzX?B zoX@I03q0FBD(bBa&m=ugCo7wv=rsybdlPogFO#rGULyAWa`djhGklr9<0OE*wCjlR z^tUwNvkQSl@aeG~S>ipOU^dM{ed7&L#6tG1hE^5%A>IP?yfqG3Et<_6TmgSOd<}0Y z)b|EU6095dOwB?QI_DeMEy3CTXqfr-hU1}xaw}Y2**`W=mP2;ZO2rss)%7U;@Izs* zQsIkhL4AT*SZwF#D1fVW0Lf6oYc3wd#%V3NAbP{4+ZK!4Wk0e@8}8dk2OJXKY~u(_ z84O2x9ceM_|2S%PZr!Y@jj|oDO4<0T>Nc_(F#-N^DL^LnTM>!Z3(67@*SrJUyHO?A z#)ReufP|c}2L?|lUOm0|66h9GeIj}{oiilbrliSlEs*H7`(k0(yn?`O-`iFYT2zXn zqeie@yK|%+tk26IY!uBaZAxwwckgw}y=uhz)WhN)7ByqVM(_9VOg3B|Sfr9zHvt_^v?lG9?LTL6_CTtVhC{d)mA%kk$IfY7XhXL8 zb)^(K7Gn$xg>zSqnduk-)^2$spMcZ}EyE$@d5zjgkl?ix=kQFNb}8^eL~VES;B}HG zt5zfT05b@EsW%hUmMcvm*Z9(l(*0PjIZ!9TIeJLu7>OW0?THV~|A$-ir-$X}UT2g| z2U!H_XHmi#SBZm0S;9PRMfZmO+Fi$MhH9)&vUZpd)A!>hOQK|=eElj}1f)(vb1BNa zsB5F?5z9&9(V3vP%=UZ2X5fLG=OC(`URis^gNJk3#7L zZ-biPbRkrrRi=$(W9aVG-43d`9RwHcOByk5VMi47Sh1@oKX_yAl3%r(hKg|J=J)QW zOJ7`Tcsu3$LAb(hxNM2%0cK%zi|X2`2KsnWGgT2^A+GZuBBZbG_H@UFz99ZYRV*T| za~RQX)$f3aaBgd5%~t=Wf0LH4+zP*bx(|tMPRv%6)8vx!iVl=g`&4odx!UnWyp^1( zIsY8*mH1+D?8)fR20TLU0-tY#%0{>J6ONu*FQCKU(VX?#K1IeW1m)48O=}Ivh)s^& zgLr!fzpJmdr1VWy|1pC(L&R8vGy8y!ksZGBHC2*3z&jurM9qSnYw8ei@+Z_k4^06l z^!TCOOLVvz-4;2ovUfqs;KIg!y--puD%XZu8L+sgO$_4Rzo8Kg%1)zW>QKx0VNk46 z9zNPysO43l)&{J%xtldarYDCo*|8P2bnWQyeK)Kc7ZMYxH^^>(Kpo?CRhZX&xUW!y z?533%thi@ADn62%+~~@u9=zu`2C8q*3Ji1Dcrui~Ebp#YH8^T*mu zGZRvm9PlHWkC-Q^QEEr7-GWX06%99Sh`HlE_cvF>BRc*5a$Ipvm^D=t=6;u~quBWV z(ajunXc@pF3!>jWk6^r4Hs<{Pj!yUL$|5b@96EKHH?J&c&n|x= zR%Z4!pbV>vi%w%QhMeC`ndPRb+-N4GJ)*Ug`K%|~b#&@;hwUP6ohYee&bFQl=ntlqGMTCZE@MaLZ+P zRjY@z;4W_RSO{QYbRJ3!J8J}ESHv6ZuC-e^_nco6e_Y@4ok~sKTiQkhC*seY%)&K! zr-cy@Vv`jO3DZt^q!XgS3Ph#|Rv`k9oi7-~^eYdy7K*K6E zC0Q@_k42M%gzi@r-|&mQUs-fObE2J9tqa~(7il+R`pRDmPNi?Ql2}8bD+Vj;0u7$g zN7CM6wW0#N<%``N335{(gxq@gNbf^P3o<=cg*R|R$`1qaokdf)xp#EmSa!AYFE2W? z%$yHgilY(;lEqEf*F3K7$KLv>O9PIoFLylaZp3Wzt9H6iqcmYy70om=^`v|$$d4&< z9yfP?d$d^hr)gCk6;!q&03dhXkBKW3CKT_Zx5FwX8Djew+2aDQ*1HG zVTo19!kw!>zE5hI<(`mE(1+&t8)ee_JXkItK@JJ%kOaf|=SfOlX-5tlJ4=~)rd%ebNe_VPWT5MuHfguq*ED2HJNj~Xab^N zdEH09pxhzCg~R`kOU?ZzwDl;Sj>3;C#C6{w?$c*Q8!(``KLXMJ{| z>*YOnGni@->HxA?w=W64lO46NVJ`MV9vnvi?r>74#evf%!<XSWCt!xm2bV&UwEI zoDYi#)u1`6KT-O>Gq|8VM=4-Oox{&{ktg#A&hkWIb2-j@mbzp`8Nh6;5V<@{DArjw zSw8Kz8@iPWvGdTN<3!T6z~*)JeJWs`via6!4qhnKOGBkdP~rMclDsvMIvYHbLXRSM ziiB?m1lK{t2AaWkyu;`z%71xtC}JW&*@h~N?_;R$c~?^%a3)qGrS56EW+&41E|@&f zNohI^J7{}Embt?+*6LCYgeH-PrYO!8@6*`d=T{|i>cm%)V>hiz+O3Ab`GRZ{0;2H# z1e?X;M(GO1jo-lk23s2)ux7@&d7{#4&5mLa)NC`bD0iKmT9Of-YD6)$QRC~W(6sV{ zIseG?Y*ssWtCj}4E~6BH3d1iPcZ-HmFV#>9lceZsMVa8WH)QxME4pSz& z!u{Un*)@McRePwE9Gmz3T2gui=9hwAH@}eDnSv6==)rDo&?jF?TDz18N+I)}vHmP8ckS5z z3#!{F=l{P6gSq}&Jo|qNgEfUj)HEgjBMkm0?*1E@w{Rh1{deJD?*$Vsn7G6~NrV%^6?;umo5EYyoxvdw{*wKdji{Kgr^Mk9IHx zIJr5vnwc8g{=ZNDubcn$i=6>302eC{fGfb&(%H-m@Xs>_xB=V&9so~(mznebiW~n& zwfx^RW;VwE_D}pz+?f5pWz6iHZ2yYv|8K?2!O6({e~lZv{s(Tn+WH6nk0d-1+5acX6FkEP>zQQ=tm57g}@T-Cf4solIXwv z1>9-qKAt!F2E7J6I?H(5H!TiDSxmSF3)0TkK7dE$c)JNi*iJeO>*&qiFDZhXe+nG% z_qh5y4HU3MF@^gB0`T>vHI2>kbc zY#;uB{Z9C%M?C=R1fsRi?M;OfE5Fnd2x#}^)!lsuL7x$^n4^zl`r17Vq;GLczmaB3 z348wrA%L_8+HbWCxT0Rn)AQ~7*gbWNYQ7#bF&z>M+Fl7IEE4U=%G_><4 zG1L>pr!bHtfKbwL?=Qqp2PqO780R2fJdigO8U*E=!ix^e@Oj<$^-+%@RX^!G{XH0P z{_b3##!B0kvuOYJ*Wbe*i*}HW&Qa-99A1B%ACBVUU_oGz_h52hAK^lx-$as{a72Dx zP(KzJzKmD5G=hI4mVfgQ110*Sdwa|LrCEEuJqP`Z0fD)f*WrrX799fg`!8`jx+L0> z<0s13-_rNI`2Qe|E#Cc+zWOee9-Ul1gFyU;{zAh13hQ|P8?*bc4Cy)Li$7cgviJAe z8sc51mK#YTJ?_)L2uJeY&^Fq>Vw=AK$+Ql9XdvgsP&~q{ztS;@6DG$V0kD&;O-+D1_adBpuOSi?yo@qqhELT51nc&dF#6b zUv=Mv*djz!;h+NtkikK&!7{$0pPn@&J%qb2?N@N0t?M!jz5QZ!2zhyA?G?Ig>qeLQ zyXY!m-V^GrssWZE?VgsvQkmEW=@oJr?yf z=*|Z?W_Y&|+n&vmiQL#$ha!rDmuX?+B71C0%w<_aIzz*3gO|xri zPd~p!Y$`hB#8~*4oL@9aj+lQQ@myBB(TeN=F%*9|CIfWy7Ls&(I;>_5E5^f3skfko z)Kqu`1!2wslMa z;j|gLg>&=WXFopu2J;1DG5BYA22lmicRb74)ZvM$82X57gMR}b9^$(RLv;K~jntXW zB>Tz87qX2CAIZ~(sv)R?IYH!<^B0}Hpf_)vTNMT)7tVihGrNVOwxIk(dxynSe#T8G ztu|AaQjuQoisOZiRzY4J6CQY`94|{=k9wTU^Tuc&H9_XcT#2S9g?R&{2o;-)5!-PQ z1Em=>Nc1|KwXN0s2;dMMh zNa&UaFQ%^HX1_YFR!S|^XA5GHxY=T!PyndC?wN58XC_LPX9beQsqaeYPR53F#*$`3 zahMqu!^k`bU5K@~Z3_Aq)MvWt|8_;qV)lT}Td0c~_){rO|MmB$Ml;*1f0nojEk;aR7IGqt|u z1yXIb9ZU}{Po zXf+8WQBmM-;}nn+=W=0%+m}?I(EKY+LbO_0oxxp9m5vIyj1`O15#8(d_}Vtc*BBx8 zWYdQlz5Tk*<8%Pmy~3rs-g5SJpw3*HTGzJv#r|V=($3SsH?$aD2$CJNUduxxQypB{ zCsqgR?M9mY6O1xVFfo($tAUc&JhO+8k%H6D94~W~4Jkc4p*q=UoHV2Lb3??|1~Xxw zp@;0?5${ScD&(Qk_*{ErQJqBj#@_#CNE*d0PF=~s>`?9e(spc%Plgk<`cJ&E2V}d))hPh zuI~$5Ak-!-D`fUXsyZ|pU0qH;b9XP_f&-sQ1^eJj+*I^!q}nI zAKy9+2rYO10)dxTI;FOinADI+#dE*=hGyH;IpSsZCq~Y%75yV0eSGP^J7QkmyZh5P zAi7pK-qLM)G5!%k=6%wyXj`FUMaDe$TB$&76o`k-3awua6i1u%6Qk(8ZkL-6@fHOsd`sGBE*o-R9oAB-by^4 z;T{pcV7uiyY-@9!_Xf9>NVKJcfWOY{ahXQd-AW?N8O*fHuH5Yxw&yt-lf=5*4{C8f zL}+AoP~g1kmC8>9wiPV>veH8h!v<S8ccNHFsEN&D6<4fdWxPPyng*MxRnp73qmNl`>sqIn9DCwF^ywzL#9obOZ)a zp%b=|HhqZjN#hz0b!I0^$gD7UwrSIKm1c?MnKlglCCZ@jSxUzfb-qe0^mJ#1t&Ze~ z$)YD|ACw!y!_S(W`{)PvnU)T+tw@5Chbeeerzmv{+&Nk93Z zBIUA+hU<9X3Rgx(q%VP43hO;~s^#{}PPEh&y`%XKt7up(ew?T+Q-PC{v?bhgB>nfF z@vynfewMuG6PDVc^sY^+qOwXJZ|t2!?=ts`)^J-w<}| zW;kfLQ>3+==ooa$0h?dJd(OUXDV9YmeLX40p4xALK{iG*t^sOu!=x;hYdr;mO+T$) zzK}s^$7T}PQd!VGmlTy%DW~l=tTrev1B(*g9GZ3hXzm^uefLb^6Iro3PbiKHxtvyU zhbq;2Rf!tmSaVN!605mczu&U-2WN)X!d5@Tlt`^@i=oYRvI1|?d%snCy z68Uiv{~-Hm^?faw&x|AE3@8zL&$5bVqS8cu z0_>>ljF%)S*K!}m@SbV6VRK-frm5N+51sWdA0?Ed=V7a#HOe4hveIWb9x#u3KZvTr z3T&<&FUG(nh+KCLy^v>f&X?_v))N6~fQvmLR9KVdNPy z3GHGCL30Chmk>Xf5nj$mr}{8X#;`J4Zap>G`hy0lx=dsm|Ee;EcXOOnxclm@F^BJZ zc-T3r;E9*)I=#@}Yh_+q)Lj;oj7XFb#QwDBDCESKy0QYiBbc~gFiqO9KbT82t&N_6Bi>J_OY zEIwq>W~fsfcR_yDO~Pa_ztU{7h=?F$%Ao=@4Aia6N;L;~db3nA|caAOSZr>sCeR`2-fMXNqh4*$nqSk#2mVcY*h&MZcq_d`=M1YWN)YP2&$X|685!9GJ?acSXBB(hf05L*X$+ zbpCLLbfvuxRiefv$k*!`imJX3{`t7*f%}QE1YUwnGY?u=ax%ctrVO0C@9)66+}0q0 zG4ZR*RXDZN9qrs{IF2*z5Yf#`ie7sedmYpjjd2UH`ezofBN{irc#*-|1Ef1wtUTKdzSne221Bz z$mRUYZ@7RF*hEaDuH@!2Ocf-3Wh>vAn=QyV;7BF{8j5^ z?w`ai@;k8)Xq07UQEP?g0*ej!L-4TkFchGv-vd?K?Mc5sRIIr$YT=fOt_oY!lwxac zY{Y5V!T%q|-YHC!AX>95+qP}nwr$(C%~LpK+qPYG%C>ErQ@3&F_BTB<{g&|%k(u!n zU+lHlkK{o4$a!1~epqk-pXE46Uc}GP$^O4cct zJ}V%d`_lO&fX03t81XXT%NA(`!h6LSO#jV1W26Ew&(&GG4Dll4&MMXFyrA+8_j^>Jv&&=k?&UL|bmm-!* zYH3ZXfvpKFrLdfIMGp3y6KsM!JcX~2Bw=5sw|sWb?$S_&L(#_kgbr}z@Ep5lhvj;f z{hGNAD1B!PBYWjA3F8;_coDdk zX?N9cuxxZrzA}hLF3CNswc_6swTl&}=uDE(HKU-?@wzO3}t12!^#Yc7?X?u-*|4!EVy{R|zX<-a2J zJVFW|zrr!RT*#nKf!l%%Q=|~p(`rd-iBn@!%;q?AYMS8oWP=TxZ+@IQ7S3l~c3-|r z&u|@K^vOE)n@T<-Hj|>eL6X#*BdU@&%PO;aJTAO#;QKw&qB*}X>#QEOM2BA@s6KvJ zb`sbb$m4rcAxW6TtHZ^che`nSRT#)9%&#Z2;#~~>Pnh#6RFi)hwP%~UOpT-YCym~i zvj#b1>Qg#d^C&kBQp*ki~NV- z4)k6GWpB8u)47dhrFx65ja6StXDQ{DzN9*%k9yjqvW?&b&thNs=NH9yEh26MQ+h`( zz5i9>d9P#ErTaGFLX62NgfvKBi=-t>cnS&)LdK(_X$U~am1tHMWob{bgXj+=zB0zf z&>3CuP%MOaY?MlLA>Qbg9V;fVpmsdPN?@2fla%5O2U~J#qRS;kjs8Pua7(K-(plcT z+sWOvY{;Lv6G3e!DV{tLT%@oxz=F~42Q7YJo{;JCmKJBHLwW$gVl?AMNgEk5FJ>XYjZ7TsH zD-Pl>}*2z-Gd;W3B9EAK)5i}<&OO*kV%HN2rTqctgip66a1G+<8m z|CJ9Z*L9jh{0q`Hqn4&ApA|c}b43~yC>KTZBoo!hWVdthskp$6*j%`|&jIJJ2A__`9U5`l)z1@_l+zM=Aj5-L&b*BQC4 zS@Xt8U>(tSrpmo14AHf+T;2-Qd zAD0!i?2R$J*LAIES2pvKOaw8QoGc=U<4^@_4g{Qh!BDt8{&CUG`rg+&*l0Lu_9C6s z$r#gFr8Q%1qZg&{r?y8IS7xNb%x))+`q4>1K2{v8Zn!M>RFKnR($ow=x zS!u;Aokx`c%fI~ga?PqVM@8<7yp}671eY>fJ#iaVWJUg=ga?(VO*PS9tE-6=Iz6Gi z(2%~=2guVUdA5Q<(`pKDTuQvVbN zxWpv;TJ)yGFG1gmD^d7Ej)1xU;} zW0)7CiK`fC@DwIKEjE<>w+hPs9IX4M`PZqpxGao5YWC+THNR#5YfN#CT^bbcf!d9^ znYmU)TLFRDy>a!7Q^7+0XMsTVRIUoM6Hi;h9uJZ2w=# z9X00LrQcSxbgY~t7pC-YyVnskl{j|Bxkuz4H(%Qc($7J&KI@_1_D034FadWj?$3+; z(AYIPh+7l6FyV|-uaCV<2(%qHu*4pq@&v+QRxG*AE%b|gvb9$;#!7TTlV7)=7vE+_ z@Ciwcy8;{|CnkG|Xd`dpUwCg`QmfFgX!2{wT$DKf4>o+JR97{OX?&~X$U$b2!al)u z#3Yj?e$%aNWBV4pKBclwwf%Yt&}(iWQf|_gPSToz4KoN8)%{2;UWZ-^Z^`V%9H#A! z99{;3!9?137xFXcqtQ8>MlvrMt4M(o5_i+2P_#2N;6v6gwdulhjEN_xdvCfMChTAu z23V1_aJOXBG8#f;XmOSZ`3-*6oL#fUs1+OE=&kD-J_5Im44)3su3pQ?jY&8c`B95J z6>lEw6j||qk%)k2DU4ZTek>j47)-ZbgB8VBRM&Al)>Za`NETbndS8wgC#aq4>4YaxNv>StRv_fhr3Eah-y zEohKU>-eJP!)8IC8}Rxa4vD=iEgqQQx|or(@}87NIw(rzA}Bz<`JRnUys&luU~8fh z`h_8L1m7f1CX$;r$}UGfSGXyU=cDbfmVnLIZ*+OL9_G*Tp%hh*Qsu=U8sS zv;^RT3PmX}tAt^}Of-+2reh`T^<44e;=e#ru&c1PD?btH)!XY+76;{u>J3{lwlH{~ zeoxxh70%{DS6;L7N}PQPJ0+T`x#WYdj`I>UTcO2!9P_{cR7j#-NA3j%V-i#~pFP#U zRtfk$B9SF-OAd(|{9+$;e6`c@vX`uR=eFYo&s8Pc7IlF86HS6^;vYHmy^vxsuh7Eq zi8|4*<64HcJoqD$eplF!-N=aAs@6G_>y>(a%#e<9BWvtIk7<5TV`fCMgR{q*H`)=n z)=dMV%_lQzc_=Fsi<|DXX$81BdG5+X15#ywx1Hdq+1K~RD1E&fHhMsh@hno5(~1gf zbd+9Ce|+lovR}<3UnvfRfTI=mPIA=Fk_K`j?WuhUy3YL3s$_9n$Ej z@8<>;-_2h7me>*3=NnQN7FV}$QXW3arpsI9DO1a+O#BvajoR}Fly|ymvYVUE!B^6s z6bp33GOWKhF9b0Mq7SBVrUi~aVv};1gM_e2#CPn=j-tUPjVVyuIhzK8E+5rLzJR{= z`1uEs;c#aKN`@Uf9eMyy-#%O2SCAN`lfE`_BpdA8n@VkqHbA6I^c#Av<(uzit@d(M z&owfnKxh{dPB#^4{CBxnu(IZU05*)QI$9|)Qmh}sPHgq851N!aG%fzjK!n>Gk%U6q zQx2o&m$aVuqa?dXk~WpdZ0fKeKi==6$L|yB&N5bfybP)NjNR~TqCis)wJE5JBH=Kl zQVa^ptw|%0{Fnq~NJhR`x39Rll?s|(3{c&c_1(FYlB<5K<59kX%6-%6cClj&|9c86 zpUtXmZ{eQs;9KqOVKNV0{#FP^**IQtxUK;9_Gy)C6vbylb!%`ID9G?g0qFnsO8C1_ zU@A5QEcA5pH!aSHfBSnZ(x}h3PkV@FNo~6)wFKuSYrHwS_iDdr`<`e|!@fQZIGly? zNG>Om9rN1jD|DRV`i1sPW!GxT;!S2F>5qZ;SFl&;jpbFP^3kZ3TI{?bm6?2dm61+~ zhjOb8p5AHk0U4*$xYC7^h-;1~r`DK@Ipl@^z#by(nkPSxUr^9<5~zEgi8sm&3^AlL z6!&R!aS$6}dbEcKb;11RH|7L!E;7Gkkqr<};;u@P#|Y;K{ucw1`wZiAjz9>R@gp=} zi#Kr2mkc_t+8G|;W2cVyF61oV$w?Z+d5eu>q(w?ZBmx7CgSKi1_svYTeN8l(vGt)V z>+72UvKN$|DG8e8-{;+hgx&J46@Egece|x+C z|Ih;&ITax-$^SqPem&m*Tl9eQf1(G!5CS_ZgDw=kptG^5oeKdQ=P#rnZ0I0i`X5Qe z1Z*7thv>oYERFv$hlQE(cL@taCuLI?0t$Ll4`Ulc+y5F>{NIZA|4TKW>A(Eke^&F^ zIDg;$-$zUY98Apr+iE^1Gb7voaW%gcR6%D0iB6_&_ZkEv=wla7Fu3yr02qdCY!_{l zwoRBoA_x?ogaaf9nV=2K-QD9b%Xx0H@Etc_dB@e6}I5 z?fdL^fqotVI)lEXemo;UHy2+hN9QJxPJkRWaEozjK$YyiIRC`I-z0TF-|IL46#&F^EG8xQgJ}AfTcOiF*=x1PXvt>zgd-Qa4}zj=&vZ2+!nO zbaB8G6H&a z-*Ts$d@T$xjS}>ogB`39D+G}UDu<27CG`dKbtiJOdm7|5NNLDChpU!5VAK#DL zC%e$WA&7<$jRAmj9qSO{Rqmy{S>00Z>)EI7FB~AeoAk*6*tavYqdy`K)fmFH?)@eF za}dFKi0bmRNUHH|^=4mMM6!syJ2)~5X>hO)3IqZQ3i#O-1kn2APO?qjLG33WD*I0Z-QFf1(t~2Z;`2@F{*GCZLx$ya0dl)42Wv@%DrKc0=={ zp8aDl+F;Cg9O})t6=0Pku5zj?snZhHyX0|wvq z|A6-yKCmC|`3mXDvB|Ie6Pey1M; z-F0{`jV3?51PcwMow~Z}SF7vA-Q@Y<_+C8Y$IQ{8`w8*^i1YW(jkXZ#!>k|AEuEpM_6Me@TsAc{R`xBC8D9t18_U%MoUUa2sDIbQYndc|73#fK zp1Z0)IYoi#0`a?PjCFHR#1+bhSy)6kjAX04Q{h|gdTKnj;6rUi7PwzG066azD4gpr z4jOgBMh7&0wY+R6&_|7Z2y*y|QQCD0X)Ui4JU(n>XYwbKu-8?y8Aj@zuVFrYXDfN^ z$6>aSI;y@X1|MZ$E0F7T--dlrNaPw+Y>mTXVi&rSspMujvaZ7$@J_<$3&ZE+Qs?xd z^ZX_v5n_&i%d9ZL#*(E3U!IaxlprB8LeMyqXGBILzX33}7J{_5dB%a2k|TJiXGaMo zrWR|o)YP3%p%EFKcRN~w3G6QPU4Ys{W~5=n$DIGdZ$y*J~ zy!XND?Z*C(mGx^+Poz*7jcPI#>*1`PgrqhOQnH&KCeH(`__&n2a=M?i!b_%%NgWEm zK_?;M1?ElmJG1&D7^KH;`E`yS48V}H<JeatopWM*NKTF*&4gkm%ULN(G+iF#l?p^w0?R`x73SR zGaRWFOa`MKk0{W5qftycf96ktBIgeia@{BDBWd2r{m3n;v^MvGo24u4)-Y56pHLp? z4mzt(ocLeEvRh%uWf!hBAe}D_4^`d;z_(z6E30Mti|nAWkv{k3KaD)%+%K)8SZFr- zRkm|fJG7o|w;H>)7hUNQ0{6~besP^Bxz>ts6)_|!eJ>hSvpp}()-f9HysnXk5+8=!4FazkVL+@4wGJOj zn*y&WHxSnpB3)9fO9itnj|bP`8f!E1je6$Myx&E*JFnA(ZxyX=XsYolnH#n3nnQR~ z!WR7|0?*^dM^_{zy&>&_^h5TdF7~v7_*ukOsgjp%*RK2x{OmC)h6*js^>4UxF3|ge zwKn~NrLHktus$$EuD^ZdQv9^d_4ES<*nm4%HiSPvrT5@wZ|ay0zGL9Tvbf^Q{-Pkg z3ZznI9S;@L72SAaLz8k^g zh%aIB;d0YI4gT}Rr_-l3GYd*4)gyN|zS6w&B^rwefp@SUL36Z{+6DdKrL?7EiaS+o z{Gu$Lgq;Y0gH3ZqT+WNwGB$kg%c*)7H$F(O8o~f;LVgx5dU_QT*&dO!5Ep}Le*bWk zU84@Ml3cp1=H40AB{h!g4Oh`Poq5v=9D+lIdH9FY@-e37IKW!071#a=u>4UEjem48 zGVGIN8Abd(+SRNuiLl3rZGhIWqI8G@#IIg<4jO*Eq}%gkHGJF0(5pdrA-H4;N)%Ag z4_9d8$Ybw)5kw+7+#Pu${>V@*^`t7JIMU?_g7ykN$k&`w0eC(RRCH>j=q2GvZWzO) z9|s)>sG@>WXn*)=vQumwQ3I1JHgFE-dJkBy4`j*nV*A)+Lxm-QWMaQHNV}WvoT*qu z;7e8J9I1ZI77{bIGrqSjp@*{@Lie)`#JqI2*H|!y`Px6NK(g(bF4V#!?Nqp0q`?wW z5RJRp`5v5`kgl0~(e2^|n7#TC#=>m(xZ+MH-nY}}B)3KBegBT^2^*o|nu0gd#HqE}8rrw$b0P=d1dp9guZsxvzH+bh1x|fMaZo z_`DPeFZdUHx;7E_^8jUW#4qh+AQ`fve00%?LD~LkQH$lQz)&H4I3b2^4BmhcUTq0h z4VT@k*N03t0I`2%Q=nT9PeVjyZ*&xOEOI$=jwuoz-A+T ztf5z1kt#K*E3f>vTMYlxx#TLW{(<^_&hmldkf;3zl1qZwp&|+JRCFCswwB4?NwpHvvTe#hh-=k4YmN^}N%6-4&B008cJ+Sf74mY>EH#>D;!Y8;K*~ zspfX?;MJZ+&v7lGye;#c50Eoy5EEY>0Mwi5AjG9r6y*Et1wC^m2TZPUOU|)dqiAqyH3CH43E1)rKFyV%q@oqv!rQ6`9alzN-mrz9 z4GKUcy>q!{b%2kFG`+&c_d;9EwC=B@MdN_lziq*33O29SjKfi0jkg~Af+Fck;`(`F zkC92I@2th@5%8MLVk!2ka7glK5at64IPirAx(?&`?XL4h)s zD|vkpa?~pR{4rJ3ama9MHNv$6_`W}3f)#A#K{t~djn6!XC^P@8W!(Dj1NrG$mOFOL z`mkgDzDL96N0SB+GK@DjuFZe5ilR7p<(47$`BlI|_nmIB9vn&fqZ{~VPrDQ8HXo5Z zv=?bGAS7FMjJd_&=v!V@(pF0~*^LOt+Y7sGpq3h2iL`1H(+KfW_ChbSpY4ySE^u>6K0@ zD*Dr-x0))Wy&l=xB5-Wbaj-z@+2VZVu6zEip|z#{dYy0^`Y zhXniig)E^`5li}v-h5u#1mh!UlOJoPNNzT%0|Zh>1QkVaQ=ZhG(}%AhIl?c<*A^P(km z;QDjona>0+?Aku(I!aNpB)pM`eL00=J(!Jhsw-zJFi>w@tG}EEGi_)togNy09$qzJ z9&^w2Csmy-T}YYp0GEvBlghj<^e;NbT;Ph}B4SO*yEzF)x2bRs6kxH&6Jjizl724Y zqIGuuV3BiB^U#gCN!gT0jBQ5hpN*AV;$=${BXm#MOMd7(5ib0^$dCIeDtCQ<&ey+| z+u5+)iYug7iOqIm#g=fJ^d~-edx+UzYcz?G=ZrnaqcK>bZ8N-I1G%1&gY_0+^ANK( z%pfbPd^PycL@Gs2**f2+54&okUkss|ZmLbT7z1Q1Yg!NG!uFcGR2ibW-RyqdO;!z@m24_f*Pm9z zy6qPqG``Dbo7FcoEf3|+dpJsfgU%V`%E|qpe_m))ccgI{C8uIWUd<&H_QP*dP*c~B z!jjcNlO~kk&?jVN$N5B$T0>IsFZ#ubvfV9V zEJc&%Nobf+l=;i@K0qxq!EemqdoBaCSUok$gi!uS%#c1P*x%pPxJF-h01=wsw2GjcqZbkS32bQO+m zSnqUv)TyGd6K3-^37RvxUp>X(V<3amvvx;~#iQO)NLy3DSao%e34>|1;mYrZlHQpLiUhfWJS4C=4 z8g>?;9D<*X{G`rcOGk5vuW)OQ#-bD)b8Bwuq`Hw+G6MQFe3VU8c$*OTUVB9DMbL=? zXcGTQ_3R1HymNYPWuTq$&2pJ!*Lzbin%eDQKU3$%7oNN;4~=AY`U>uyXftv#eguHZHsaT$ zEmPLP<6tUD`7?)7@mT-^sf3rUF3X1Jzu!P?dCpGcr_xW?n#rNTMhM0MNyQZE-BdqM z)f>fEy)-|%wHcCjNfEaLA6p)#ioXRVbZ=;_D#Ly91JME-3!?RE-)L8s)ofgy$+=I0 zg2N!@V9zCdF(e&Jqy2w?wgh4B{+6JFyO@N|H(QoCdc3Al zKO4q;0Tpr8e;BFENrBeG?SsDqtUbn`yT2HHMH;RNdQ+hk%=78SJq;+e31l3e3F_8& zfJBZX#T|nv(??fE+-%Vw=Fvb;+UgZZxa&(Z6P#;qlJL3&qfc%Mt>OY;ryc>-B3yar zjZeytCF*qGYwH-vCVWT+=qCFAYabC-bI%cX2*U)^GnTpEs)3kmx|-~96iHt{n>0lo zWfmh-8s3VH;UmwG3u;kTA00D>_XOu*BY)*m-_}EHNF>QfW5Z5$0DCfJ%BB*XXJ$R3 zRdm&>5RgV`sbGYEVLP|J)6d7~GMQNKSRM$sRU{^Ft#S^FzRXUdHR`lHT3hD4ucBmZ zh{c)Y3e@56hgTVB0dtT}x=`vpt2^soD;3M%h%+ zre%D2>~xPuRF}88l)IcT-A$*~`!9g+^0C((`t~ChuFzdH zWMFVdVvT2rmWv;6AtQs*jb}?!yhutSutG69Y)XrCiaTL}~z z*%&w%8O8KE8kAOghchZOXiHO4L>E$cC8jqzXxcVb(|Sz%o0!TW+-eH;SEr&({pW_z z<>8Jx1G$AZpU>GJF?Qa7%(KManXVgM1U}kaJQK|q-<`$&uq0fuV=omasTlZ+trz8O zUuKAqY#pqrRYU2Xb*7{~p?=QIV8|)5uP&yO*`K7OmP~L67CTc;8O_rbXGLOrz(MVe zvy?jY$qO>Zup7_w+GuAL4)w8(cW-G;MNj=ZxxuF-Kf*{x2|=BEjAUwe()&If%&r2` zglmvUUn?XPp!{oMDqaQg&(a7^HXtp9 zd^(iUGNzUQiUyQs`QQG_(O8}&iUW?A8Q7Id{*|5~`rZ=u(@0G?3u!<>Q8T*(V*K=W zogX1*XRy6Jjb~BfVT~Ip=1@vt@8o$@ufVROHBdao{uuqpU%JC>%CkB=Z`Rx&73`xD zR?!Z3bH0?c8y?@km|XKQs_BNh*AG|`6733wR+I|GdVWC0XUn(6 zMMc3~W10&IfcsM7TT=?Qr7cxIkDi2{_F2iLpl+;K7lG2 z&RfJ~tZOK)?_68FAxxK=G}-{r}oKldc|?G4Z^RKwW&g@k1s>fz0g#5b1uua2aH zp-3_>Kj&=gA~CWuAA3`qJehJcnR5;*07TNM2mCQbHUVx*WqHzkfqfW53f3xcf7Fqc z|6A@f^EFvIM0uFYeKb1b9^t)%-KkL@CF%I8WZzpdO=bKfq@4^m^2aF-`GeCdLbzc( z4K>j%`MdlJuK*FNen{C(RLGx^KZ3>R=9z4*D1;!#BXJKF`6!L7r|rCDE=w%f8YThV zk@}>gF2~?Zj1@+a)}1rbDMLDb@))dfz4?k@t*ekkk;vp(v3mZ(Kp@#Qz)TV(ifgNW zU8sAxNpoA!7t8%;UP;0J=Tbr<^FuFP?(kiTi8rg-*0uTt%$H)4ph$OOb!=1A>dVU1 zbTTQ^PW^JA|1Kp~V;}Zk^eDys2OsDMlgG;ZfPv=p4AuS_z&@R*ygmua1*{^G^;G{K zc^=G?vswN7*X^gEuu`~sw9<#88t3}_8x;M|Ct;q_Md6sk zx-q!!`6FbMIwsRuTHhFuV~2X%gFv5fORT>78-_&FgGek>fne*(cM&J!J5;*8ut)TV z5>BB@vhLFUzN?`Ob2RtKP1rmzVd29)Y)rnw8n%ybD=*zZ>|U=SXXhLHqP?J$(tYxM z0C7bm+j2mP`7|>UB1IaR$B!NqEmgYba&$Wfu$2KDtz!vB&A9IHUGAJ2T9)p!6~rd) z2hLIpw0s}T$ibwW4{1RG#xr6?xLsbhuXWrp%l$@Cq(F_~ZC)BB@zQO6LyEernkHExCc z?>TH-wk=1hC#;!0hL4jTpR5mO4iDLGpRMw{-}GbHR(8`E21lDLsQ_H-sC=K9^JW;} z+pC& zzcVaxQz9Gjb8Ox9&7-bfc;Fu}VVT?a1x#Y<=y^VwhT|ROt`B;b#u>^4j0YX>5z1>&)VQ~w|Wwq8%q|_n@c^0olRf8_m2eX+3!z`u?_TFdJExcfh`J|wUOk_XrzA7?E?%Xe3+t?_; zb|jX3*FXj24;}*xN7vYv;z0DRylxJx94xP*;AFF`8&;mEe0d7ScRBxX2uN4b{fHIP zsmUX4dlCyd(bBYe7Fyg(??>gE|BFT&$o}rxsnTH@87+~;DlV~B^;Qz7Zk%h8GSy90 zqr##s)>H!GJTw+^Uk&YF;RlFwMcGU08UTP-*6l2j?gTFloh za{L%Iz2*`ZpR>qd&osbvinucjbrg!QW}(*f5~W#|Scrk#b6p|0K09r--lD{m9MEBT9qMvJtOR0E#Y0`^JX;BmV?{eSY4m!a5@`V>10BT&v(rCt%eF~ zaJWiILu@;ESL~ElB+cO{UEi&ppvV?G&XUGk?SDKjGW2gIry>qVaLwzan`gN7%Lus4 z>WC}IzjE&HTi6OGlwq2QVR_5b#cNgV03DiF7x+_K&S6_n^8)?zVrVix6biH==lW$s z_HqLK4z%z5?)p%<>D*?hDJ8Wvq%yRJyl5s^$_Ol!;?~}8K;YV7-N_3~Ru9;&uPr?@ z*&s>t6a%&Imyq=PmU;9w*#}#*>ihF73(@waUj_R#h@~H+H`{4?4>*R`-Kh^`1bzv# z7E9|%ev-Q<`%&QThj8z{)`xo5TQ3}b1YU}{BllM4)0D$(+qmA4xR0c1M94;_Vm$*AQ~wrq7Ig}I$pIaE?J!=UEEFgaO_#GpFb@?1fb zw2~-CVZrlklu~s9p8)Tk^WQkHH=4Lx?`kSe!rXu|lQoVR`o|6V-1#GXhStz2*3|V*1?!z&*D%$;~I%rRO8R%J1k68l?kStIGU?|d~uaZjs^o14>{rfOu3)nN;_Xs`?yKUPB z5*ivFr=hokKhW3o-3i(H+Mb%I?S<|@Hw!4cdg1nDD!ZgG=XZ-Pz6Ab!Th=f-0i%#% zb#FHV#t`E)ty7}0-2JP9VohX$wOH5y<#oa^u9En-E&}qHRIV(XR&6~47JIt&X(;Ke z8!=arp>{fzTUHfjTkXT{+1X!KoiPb%VpDcer^Z4yv@u_X)uQT^#6_<0J8I%+xnd#t z{BOmBQLA>i7G5>1T46HQU3@XVy>Z=~7-Q1hgJ!&v<~pzKCL<+geAlS8Q@twtZKVHt zR#}ARfS*Peo;1xh8bjF%06Bva$iul*d`9UUCA3p?*YwH7s#v*Xh;{sz-loA2L^lC< z8Xk2xIXvm|EvK$Tm<1E@fd5ZPs{HF`#=psy^JE@(v`FM|Q6i;9LMn@qg)7pXjM5VC z&yiO^c$H&YYit_}Di79#8u%=O_bV|!oQGU?HpAxF1#AesMwRZ4qr@Aj{_>9s69a_ucBHJ$@k)@+5{Fzbf%}(RW(45-iZISJ zEt&f^drF8#GNuY8zAE&wh{^1FSf_o^a!6D+b-y{z&D1SGvq<>=^AFU|<$4l3w0qA# znqWD32!e3XDcJGfQ&us&W~L&l#mjA@V~>JsIV+kvwVDKKXfCTWzi$62P?z}|sr zfw_(SggBLEU27C)eB|`!9ZzSxi;B6C9WgUrB>W;$k<6h1vLN2D3U9XBH5mtGuI6U) zY%Z>*rQ8=~SLVW+88lt>+Xx87!)XV15sjyB(gm?O!U9egQ-MYtNaBhta!hW-GRp)< zZTn(C&#vbGQ_&DvC+37AN_B*j3#eC#9o`f_r=%JI=D1n=&O6y|scgmAhJZ%(h2y9U zG3-4JZ)M3sYxo)|I}P`EECi$g&>kCS*%q&m?A>2;t?7ajOS-t_u$7ne2=_=Y#g4SR zbH^XfR9#F#p~cg}zd1RvkTaYY>T~msc_puM^@rFHd#)vlQ~NR7@3f)!>5&r}n7k44 z9tf9KUjh)XS`^|dk5nu0y`QVuGzxypspRY92iRi}@$dhJ+hYDNXw&~6wZ`w>UXDf59!z|95cfKR7NC)Bg^2(K7zWNYcg7#?n~O&fLb7 z;CFRp7gJj`0`^}y&3}ShEdRlN{U^9(_$!7n{%_8g|8Trmcsf{^+Wo3ttpA7X#h%{b z|6A|k@+))k{9m}Q|G>Qd7w(JYziiHba$oF>zu)vfxi5AOmS2YLzmI;m@_#-OaB#A6 z{2y~)ZJ;Wd8%S(2`EAyYK*L*G+56XW0Rj?#cI`x?ls75qK%=~)(J1t)G!pjJ@&vRG zrMd})b$z!4;o=xx`TYGs88#XKJVZo7 zR{cAHO>_dT3AC*t1h%k_AzT7)EUaO{PK9g*1=4PMg&3l_2MlTobab}2x3iY%@Uj%# z!6nh$0@xvb1u=kL09)MwZUOqFz${g|0sc}4Lk0k+oIyO_Os7IuhTF^60suGw!8U;i z@NLoLLvW{X0XJ~7s*1oC9YBQs2}|EZb->>^Z~@^-C;WTg9zReZL7#3dEzO}FZCb)U zf(YONybv%T7E~3E13m_q0M-#(^aZS|z)&x(&kbOK)wa7KJLTHID5gz-%ePQJizkK` za899~4qgF$y;g_ss9=7RPzcJYa~RJ9!rl zn5G|V*qdJNF>r%%*i?M@fRB|6{+-D^sK#KSzMh|1|JF2VRee!Wx%4afe?N@l<3hdw z-#A@F0C0$cb^!43@dN@A5&#IVHym2adHkNAskKl;cu)v$)Vi;nU)c>S_^=E=mA^1RUoF5KqnEK!mpG@4>Ioj{qJ62PTM`qFzTt4_kw> zL3-D(4xevf^lx|={zU&(pr63}>z_Y+Wzo%*;4ts-An?$DT%Ue)9s$+7I6w6eKy5sP zdHTlpqazF!nVUiS=%BmTZ~({CBQQ?WZzYIFdEe>WtN z_kaTH&F^j+P{J0R)IAMlJYsEtUv{}%9C_ew(ldfK+F~Z8*XAWs6C63`X6y#j_FH3|0n^dxx={@tL`*Y~x_!KfgsVX*^(4kxLQTk; z-#j{;D=GYqCli7K=n6Jr$O$hXu^!{Ucnv%BF`LV~@FsY?@h@)Y}e0G9qag9(X z)+}fFE)e=1KS=>t0n0lDf6+Yu2z6z|Dk4{FUmMM0$g0 zC2l+JCeb9njgEABEYM3qc*x?FPOv5B4BsRQ6Nd9wIaPNur|GkwT7j3{es!dVqJK04 zeLrVH>^oDZNdyek8(c;gVF@D_2Du?wNN6If|sMs|< z0RZV1jEW$3)T+H83L}1ut1N;IJTL7BKvtKPrwqFu5ad9_KhR=q>yppO6ZeIc-_dRB zUt{@`b!KYbDb*plu_)SeU_aLMi2sIBLj63>?`ywJnif&MZmWZ>Z#W>iv{~DoiV@Su|9Co$`{dX35 z5Dlz9vYP}nEuQe5v_MSOdv>06VEd4q*LfN=&KT*lu0tuNz0hLWlB^c3lbee3{5$br z+ZCU853xiwm5>gig*w^B-e~W4o#?mF-`g58g%xG`T8M8a^nF<@Ulb~4Q0P#5@|6|vxk>r%w~IOz;`>kkzecsujftH4!zSnDd9uwg)`nRdwvOiI_^2L;32N@|>q{4D*tN5B*L8)veqJo;v3on;wpP>%(VRyA6C}~>2t{9=(djKAlvbbu zd5uy2I{7qRF2r-nDlRrRF$)D(SOdVJc`?(ime5y+!LLe1+&bQ`v|0Bt|ATl!!aZnU zVYpj~yCmbAz8eMSidN&s6S0i~3=l>i8K^MDi*g@LJ;$IuMszuSGZ7x8at}hjr&}kg z6}QtOCj^O>aCzvsj55{lQbD)9%i^E;zt6Dql$n+~8|%wVHjZsgtv?SC!Lv$V!kgKs z;V#5f?16yexS$KB*CvCDs-!ZlMnxbia#qd#5wDDd&TWn!hkebBVTO%&8^*(;cf4zT zBagl(BS(IVuGz-N^3F+UUC}Ten-NT{?&5Ta*$|G%Z6CV;sSc-?AB9qWOjq={L(Pby zJ)R+e<&n8v77%i4L5WgOW#D>LEE{A#K8+#C1$-`TeX8D~qD~OnF(MtRU}pUK^NH5$ zFY_F0#mY!9LsZ-Ye1f!N?Dof$iJpm%Y}cgPeRjMLZk_#`D^D85QCM{fX%)Pogb6zt z-hoH;L-M3t{v}l3^(SKI0Zhkw^WRNjC#5ESI@GbW(tgZVUu1}YSB&@ZW#O-!4!!mc z)Ht0jLy4KE-xi&JFt;Nr1f%VIdIein$r`^JzI54PHWwK3ebh~&^3cx@81waHP|HyV z*Hs`u)W}lSQlAJsxc#GuihC4~4H1PZB|zqe<)3oH=A4yjx5L>~U$7+ty&xfsKIz;- z!@+>Pq)D&vvV5Kz4un54VRe3+zS+h$$N4$NViQUANpk6WuEzXG_QN;dMCrBcXw$3# z&=wPT(%x{|aioU5vIvpb5BWGt?+YN#RZ9J&fq}X&xo*sma+$96!~vaf?i{vuhfpCxN9w5{c&6&2-?e~bC0^K5 zD-(-)#g#-}ob$4y%zQzw1kyy^qQ1uR@0cGwx?H-@yy&tx%Fe5=>acDyCdQ2gBalZp z@v*bn=iFBVip>y^UEaR;o<~44A#8X9ZKnlXh9uR^{w``-zG-!$ieSq0T)jVAF$8*{ zpe_-ED)yx=Q1Bo|I+=a4PORXL8+8{Lhcr-g<1Galot&S;*UMG*FAZCuAlUVit4h_D zvsr8!X(7W`Q^PjDV15#rN#=H2>a$Q0(qA70m%lo}KX&c=@O-eC&9XdMLaMfXDu>1! zQr6VCMpZ4L?qDr5b<1tBnqQF-2q8=&ePWV$Cpau~MG+Iv+4FWL4(hHIR2^7bsXRv< z+zovZQT|`7yNT!yI0qfMMQ|G|-w4{bghT5>3v#WcT4FOzv3lj|&(j)w|rrG{w!KY+HG2SB) zJ{Z9##hA;lfKZ`#v(J?ujCQ8yd_4^yETqNqJYt>b3+hyYfBc&UjQH#Gz+Kza)oDjm4B&;Q-Xp#<4HJ)oxks3c(>%Cj6EH@mUeCjfwSo7}x&_b_ zfd>;p$Uz)p30XvRx``ryMGv>Htr4f>7C_JCqHr?U`p&2iw>ou?bq+B`lto~yO=}y{ z=^1tGbR;P zhyLjovXstV-et)LI1prnBCv!^KaoqMR6Ly~6>jyk3wi8mcQeiH*eq;P0gd;m?59y8 z%%#6ql(NXx+BS)P=s3Hu#-~L^Xsg~UO;($8pO}%9v>2}yHN;V-+wFzFx}zn3`%2hC zP?$va6LFc$h^fV$BiiARmk$*dGvN7j@%N?nzp#2B=kUKAx_#DyE4>E6ksQ z+7}bU?&%G_Xp-HN0*-*>#2wTV@|BUi?!!YwWsHbzvcg-IyZSVfu?|hdJ4*FcdeFIx z4rNh!zFOyI{AB~D@{Nr^Kdj=t5O%wse8Reqhp)5_C*Bik7xW4G+1ft$g|?L^^#@D+ zCG%kb6stY@pmu|b&&1ut#NFYmpL1{6r0K~@gzxNz^AH!SSSlw*`{Q7|eayD#m&Gzl zvHA6%LGGs2Qv4(PPS*ubdGWH8v!w!dnvaz`Z&^^jA~bvP(oclPycH3a-kjOr(w^09 zcq9vlp5w^G5Bl+ir}WQ@s-!CMTxlQ6p9IohOQI>wo=+Eb1Gb>a4 z?4F_48BakxQ$~;kM{z)|H;RS`;O{ z3vIVxQ|Ez;o7eUu+JEHQ_QD!TO;wIK(S=kqd z4A$iSc_}<$9_{KPW(+B#-f-%)8~m3s3Ff!?y>ZE1txtx_OKo(}ts^T2ESD%jr@`Q` zqtR=T1CTw%vV+q0X{0k@6$_|H*i>!TF2O2!VntB}A<3c$&F~O@6zP1^Fl9k9*erp) zsfjzWw*}MFN&CrO{VCnPwGf{AZGX-fB=`MvYTFg7%4rp#(~P`rzZ@S(hH~JJjrk4d zI+~^~KTi(C?IFlM4s(`=750M;Xaz0S+j04H^{oUA@sMhHPKFN}3o#N=-*di1_`tX@ z>l(69bZn7?Knw*Nq4dGRnjAs`W$q$X7^tm5lY1;Sf%a*JO*icCxK!5g&fyLz1XFh` ztUcU(*o23qQje)N6mz1G1*AWIn=n{~v354(fJ}`W(8)!cI^2xy9XizLc}h?s+|YD$ zl19k>y|xH7+({n;N;ofk+S0>{5smJ3GnZ`PpK7x*QrGuqxLoa@q-YD|i|^DO2~;9+ zvT`}~*_!rfAh+>WBFY}D{uzk zRsg*N;hFn8axByky7af@Rg?&nq^5HDnhx^RGS)JtzHvw{Od8uuDV2-B6a;3Y;|wApk*nRIDz%lte}gGX`grXOv#MxRJ# zvwLfU7E3^4>e5OCUR(=KfRk-A1uMRC~-pQGT>h;qrhd=Nlr{yE(Yp%>X@~Q(< z-qjn_A%Qg91j7~N=0<;!(P+g@-fOgFy10ZC)E^Xr8LCNXD_>E`a5$OV4a8XgtmNj7&O6sBFs9Q#% z-ItqPiP+Q>14Ct@^B@S}!)qeCHDy%KvguQ8oh!~gMsq{odsKNBv@9n)$IDY&hvLiT z#57|+3Kn5r=6ynLU+%86gvYPC)!^V3&m1F9&ut*dU4eS8=9{V~&A%~H2aW=7PpZ@X z+iMIyvOLouxJPH73BmSM{&?3xt2~pgx-`7I5vMVf+xtQ-dexecT?8|1r>~3kvUFVE zR4_I)fcZ8uv8?gST`AwdV^DKnN~eIOMUpxAOH@%$+k&gFecPT=)Q>A!#VLx;*gSG- zZ~Vaptl0zM3e+Qrz?eamGwfa7V;y7)+k_-$pQH6}|4fyd)52IOo%N6p3o`%zU zI=l|~+0JXD#Mz`>$-*`d7bgl8I;*vpUGtvSUEtOCuM@;4UR@-n52CO&syay;NJ$sk ziC(bv0S(QqUTFeLYB{F@wln(JF?-Km=1^J~yfv5)yE2p5HEEnYUtj4T_=rI zpXmEK8YFdY?n(C}%s(s_qfzu1Lu`rhs+%l+zpU8NNVprl!Wy3h;Oeqm-7~ej=AD@R zW_8WO)Q(X5*AQI8whzSvRRqlXTS-pX``8+|vcDOI7b4eGY!OufZ+3B>-3khi&ehWf zFTKzSme@w#ICPis9XAFHesPl4(fjBo@^@kPdD7s#ZZ@ zTe_{+)-_K4tgr=KzP9@~r!N8CfwW9b7^VVKLqZAQN9HDQZxf^P1LWe_A}b{L&s@gpOja-AdaH%o%70qE=y z%AXU=q2UHZ&Nj`}63DP}tIfWk9ZWBXXTNg;sfDrgrY%EYcvC{BV$bPrWt%0UZ-nVO zei!v$F0x*bLoau{vt~_9m!Ce@2h2BJ_KiLnH8Sj9zK!ke5$o^G`VHj4_evSgRTfud zJez=BFapNPCpitSFjs4pt;79Q$67gB!p)SytZcV=;KckjPbdMmPose7zQA;lbNZyU z^lO0yHU@IDVyg2(g~d}3Xni93;?Ba2L)a@B#mhqneZNbW0)maPYUL>fx+pj1azL_ z>9s4TZeX+(h(Vn1C?U0@tFw6B=Wmp9Woic|VM#;m3;KOJHoXm&@w*`l)+onlo#FeVJrASUtzNOlluL} zS4Y>OV`bUo@qkdAj($z4bUkj?8y#J0i;MOAj7URD$=)xb!VUJK1O`?+Voc3(dUN+= z2ips#11(BG`U~&)2Rv4B(2;XjC^KTy$Iv1VPP(k$$0U`hBurt`lj29ZaNSAybxI1b zWYO zt!uhtIAGFu)cZ@&vFU|7bc7k#sa=L-Z9M3zIL@I=>Y&PjMpfD}Pi&Ne(Y@(*G*|KC z1DHa`G%O`7B5fWaKHN6SyV*7CT9q7k6F) z+OMUI2q+6Kqy{MR8OC!isyqEAz5B+}iwHVqd!3l0vJ87J(B>^QlHPj?USq5@+}wF? z6$r-t;%<;vZC?iQGq+P=l2KO}(Z=uO>1!XJ5hkeiY{-0{%n!6IkA7lvX8<6JEk-;% zfA)7%Cng90;RN_sr!ey$JpJdk!wIIsj=c4_C9g&kD&Y-J44;yC0VxNn)?Hizi>Eby z<8tuU-Rg$2MXPTopuTm;(j7O&r^dn@?#aiGy2O%s?EF!GcV)vn{a`qo8L;gDs1h}{ zE_?&}b1Tv=xe(-l6&EHH{e&ncW&;_Lw0YQx97qtfeD*Tft~EY%<0!A=%Lq9wdoX_F z(c9nulC?96kPP|9VdF9j^#0U#cJ`44u?xmS!#jWbMZ5%Hc0oiw5$d28K5k!$gUvVt zUqm~@j`D-IHSSmuJ5$SXOj}uPMieDvt-&`H#Mg;j9+Ja9Dr7 za3Vv_Y_}b^uUo(80WIf({UaMrYX7ZoD@Dyx`A;!u3p zv2kToB>_f0^mD?3yebHJ#kK2o9IZsrQ#AQo}=dk69w!1VbDnFm>-sv*KA zxf!gQlUi4>eLbK?X-4Z-XZ!2ifn1t~dn)RIFUEG7&|=tw)?f(w+yL7J1{J(kj9E-y zd*=#E3waySpM)YhB#~#adNEWBy;SpOUeN7OnXPvC0!i8;Y4WEsa-l`L723HG+*#b@ z(tQW#@!ncVu})~(4vKc)Vj4OZ+bVjU*m3t>)+Th#_}EvcF{w7amq-|B)VYoVkaS+5 z8q;p{5Zi8dcoLH}Abgj(Cy%f)^=~T*9Zr5_Vo|I(m(ORU@ZSxsDfgAiBQYj4*4c6Z zXWCF`#md*Y#gtGnvd*Kr+!jh~7e=5436#Q+D)2)x$UK&$eDj)9JP+jm>O5w>M`DWO z1!qb4dEC*7_1QNk1d~yqk!jn4i}_nhATv;QaSaqqerJLvb5x;Db0^Vojx)9L5Rk+n z&Z5q&bep&Tazr6LsHQaCyQ7yT)Ikh@u^=*@Y`OPI^jn#}EKlo87S(J7CSW zM4ZX!-2*E3e_-}&tuweG_CNG668i!q&oo04R344D2dHqgTNBA2q9)=6mFQUJvc6V2 zDMj;SuqZTHcqSbxflyc@+;kka@Fc&XL!G z!K8Xm^!zQQ0q2P-{31H(@f6B`@L|3;F2ilzQvbV<=L{Ln}Lgp!zl*8K}g`p-5geQOf~ zdwoL-Bgg+zO8xKYKc!Oo_VzX|KfbAdoBeb6k3~lRwdB7>|2+6}!0M;;>K{%kGiQBE zBWuGSzm==mj~C0@(M;cx&eF!j%uwG_-}=vg53Ow+|9fa-PiLiX>)>cZXKiGn?`URY z?O>_zU`pp?ZRTub@1Sq_AL!FRNT>fmpIHCxD*Ow5VrKjo?8J!A%E>4CK_d=UP`$#%ye)LsN^kb-Jp*jeF&HyKb`1Iss_jn}igI4n7WDNK-KZFjBvwQf+Ea1di`a8e9)%tf-mb!y$ z?VSL5gZeYI^TzCbC{EyKeGFLTkoi-;<3dAcga8&W03Ll;0DY2{?;ZWWKn1${P9Qux zv*2X`EFR$HbW>T7KgA9%0i6Q?<-}D>Yo349Rj(qzfWIpB;E{U}&4B`tK1(<`p&P&M zqj)`uCxG<&VwP~hd%n+~ubWLw-P8$oV6(sTzIk~$cLO(= zYRj7fl@iUc`hK%50=x}3oXJt8R_yx}jFHo?fzR1uHKe);2zq`9`s~r8fuw>hNJ0hs zuYAQ#!Ppe!h1Y??)ons+JJ{QP4g`$g$Ti`bbFHG zQv(7J#p(Sf^lr!m4h;IWh2KC>m5bY*1PyT4i$LxO3BXf8*MsRt{N_}N@0i0+zcGXe z5xgQayPV5%hPF#b`qBNDZ}^oC{t_V<2{Cx9SvSt-0oYE2aCYm12FJ1g`|ZmMfj|Fuc{jUNvtOMW0lKBAh+$w zFGFjNDg7stQiG6fy?wz>`JmDK%PegVg4{z=CQA?k3Q^X4T?C5Jz*v(%COsobOp$PU zVe1(MA5Ldqxc{Jcf?0HN_l*`)m<*np+R6CYsVoY&ThZF9Xdz)_c=no95Q_s$ZJ#LPrs!#_|F; zFLBQ-+0@(oIMOvq`vAQY_1GPY|<-BGju7- za)hCs!xQiLOI4^%vh0&1Y0eLa$u&Of?cl&B;qL{{oz{tf=p^z{DjZW@k7a5meeX*Q z$si-4YZmH0;Q2plhXEsFpW_koJ+a;ylW90-)fTrRbalW+442@~6s?xhpT%S*b||xV6I^}L(AUYQ#h;n z^uB_YHI_~=@4s^W2)mKB)Qa2Ws!Uf!sf5cZQFLV9IDQb6yes%X@5|8}4VBe8Tu^3~ zQh~{}9gn62gV`i5&ZrBK+c1Oo52i&8-``q9>1aznS8akzrWBkPQYQ9ImVaf8W_KqX!KAEQUUFWlV>jKyl~wKnN^8vq1btS24sqeMnjjwwNvo;T z%VWA}Ku0(G!us$55BQoJ#EB-%4EC~)HnqFO8z&(&9RpvLiiBFJg^FoJ2qgTzHVaR} z8L%P{^4l##NzS2{3;Fa6fl3gvGER{Nq}@^clQo~$btu;IOI(I@Vvjn?$|L6y!!3d= zEQkp=ozset0E0Awot>Vzs(Z+6R%S#6oB@j?dtR( z=M}NA`)gDj=-+o}o|YoCm%F}v^Ks`2jASIbNzWxcg{A%V3yDgU+|kEtDbeSc14z^B zfq5yJs`y2y?j$V!hsdDa?-sjVNBD*42i@UkQ~CzfplMnv(5Nj8=X_kzU|)WRw*wY9 z6GEK&l(iRB8PCD)sxY0nDb~S}UAqdJ@V8U6mV+Z!3BYn)mB(S>?pqi&?2-KUH}$Kf z`>)T9nv9lGn@F~Ezs?$$V{pOdc8!oHwGtS-u#s)FZDWb8Q(NMl9nMf?GXlINrxFwc z6zsC~bvj)P?`fr`Ohqn#CT0yR%;fUzmV)@Z!B+v>i+RZXsaTK%>7*FEAHgj|!B01n z0$!IoOjXuIiBBJ%V$z)0{l2^N6zFzIjbz@Mpv_^#r#|Ci1hQYlMc72~Zw8)SF_dS* zYv{j!o^3qToHc>5KzIqL&!_!}Ivk2g%{-QRw>P}e`}WF(E?4W`$t(0;S`c5JjS^J6 zcT7jG+c(CJ@MAlg&UbInl-$iI z^HC^ZHyBLtL75+iy(~y4KTL)D(FNnBl~wy_8ZW7=TU3Ia|2=sf;tk#f|HOULlo~wl z{3S;)P&E7+N3~url6mruGILr&&_`@-)Jh*;awF##2|h2IUakTWN7TPV(3Zp=iCpoKzH$(ocW+hC+{VaDUj7PD-9|pN&o6ot+ONE> z9?N=AzuLl$v^5v1Hi2bmm{~b&2EL~)G+x1FL}1RI7^&b0Ez4BRF+4?8w=e|9A&4jM z)r-5rTLZq}qEU(*J(Q}Lx_bMI!{}}a2pQA{?^Lc!+g2--4@GUV1ef<;K4pC%*b!-@ zQ2Sl8sv^&~+RFj5+zwZ%SmRE~8bTYA&yWyiL?)wD^TUjx!5fW89bfth$&Q$omh4qL_?q%gDBFC7haV8rt}bHaAwnrhufTs%=88k)p&RLuxKC z63aQSCvfcEgTsx~&%kMr;v3=Xlyv8|kkNRCuJLLr4ms@z0LUAYChn$gTWwYyVD=}p%J5kKlr2tcha3+XY~rx z)T5?D-_(3fkYXLAeUNV7XgWS@DlY=2P9TT*ya%S#ky?YDUiSQ0MlArHH!B!*PT@%C?r^&~+R;z`EVV!Oks zyneZLs~a}y4cn6E2ODwC6vePKb&G6~>=I=lpFYe>mrSng;w|t>wCJPDZi`EM?`1KJ zTT;ot#N^t4MN+XB&v8Kh%#$|l^(9(f( zkt?yuyfhOL+8?J^ho$xHWmJEa$PWkTGIf&0Sz>9tVbd>a~p$ zG}^$1>ZY_i5Z=8i2Lsb6;1UO&U9Y?j_*hp5KYt|4v68ZJOtaKGw=_KU!txaWG)-$s zS|aVAznP3R1XBq(7-irUh@;lGKqRzH+Bqg?A7lMBTr>lJuKDRzkx9R9L{=v=ObH>9 zImvQN;=ga_CKB2Ye517)18jp_>r%FyH3w?v$&>9>MBq*U*nef;MfcthKFnpBHA3^~ zAjFimk&C)<9IylobL&77{jFWS46)0_xt?WM98U`qAD$^(o@6&C`%bai0FrEmr3r9Vmah@>fO`wPOx?xC`t zKL@oR0>a1kH>{8+Qt75Xju8U zar=`{!qxal#+$x;{F``&HqSN0t|?hJ+7r3R8OL)OcsbIqNk)M_q_;kr^L2>e2b=Bk z96FlMo`{rX>K_Z}wE1!IdeFeBf#uIZ0;F7B zmW0agA1HQm6!nSs=fb;t-PE~Qb}HVOM6OE{9O8Yl1?BK>V&$h5(xzr~8%>$MbyT=@ z;I72aEnziXl+iag197^`xEKEV7;rVv{9}sNaISZrJ(63Z)4evMFWR4(#jCgE;l2kx zsNlYu9NZWL0sHweuzR{Gud+Yw^^`>r-_d{=`9hcUUZ%wA>Qva;n9W?<*2K$KzI}|D zdYnc=XuM&0NJ&efPI_TJVqCl9OxIi%YtI8)Ba|0}F)e8W2Yx%wdtk zSMtGGPtJOsi+kNtr?H1-^H{Nn*zb=56Ry|IB}a5=YRq%Bd>`+O%cJmJGsOE=PkBfa zNa-JG9DUn>d$d77_@`Ewr=!DNHr69T4|S7CM=WI4mUH->F{q?`jR+72l%?nX0-15K z>7$#)maR{Bt$i_W8S84;1v3^&)C;me*H+}O%-h0(^Ki1P_L>wqmjrPu%iWTErb5(Bv>XTgt>L*F;B0QfEbF@)sgoKCdP@Y z{tUa-RS)rKm0;z%y_sk8Q0+teA!ClmN+$fywswkZF2adHhg5&nO!N0T40LFYcUV@p zqez8$=;&xSX&3{-6^WWe_z=iD(H3{+%m?#zbi1iXd*1s2I@wi*IDrqbV_7*^2c%cV+4 z1M1bIF?=0ut8Qz`d0~IH-ukdc-#>3q!pXtr`o9Qg05ruUm83ElTV5k(l;1RQHpm4Z zq2hA=;h3lub@;GJt1xw& zGZ9Sp-7zN6F^=uQ2Qu#Tu9Y3u=eq!_o<58_uHnA&M26B)%*eYwE`-)2MTePmgkXyO zx7S{SZIgF?r{ze@dp9$0g;fu@=7ynnBoTyvTzS`E4+}$?7 z+0eZSQKAslv}#h`KOgMQ#WeSOC=h&gL<2JBwhVL6*|BXowoW`3(AdtFJ?Omfd@|Nwq$Rh0Pj`1^-haHg4$=GHd21I2Z5f@Pjq#gHd~M41@t%@B z?1cPkvuw+t>ah3?mCm&q=A6^7w!BVz-{}9yqZn0IM$u%aU0jB!Md7HjM(9-oOJ~ep zmMU*|ed-cks6eQ9q5w0g@T-q+G+eVs>W^o}#;BaHdN2Q~YA&BW}IfPAeOI@T8jcKAVJRy=Kup zrP}Bt+tzm>r>9l{rG77<(@pOt%A4tYBa}hZ!5!{|`cXbzi>Msu^tHPJoR&9Mh_qr# ztHd{7#UyZ%l{4aQ5~&?)@0T1yyW$ddf>bjB@Own=*x+qaV=e}FMFZYUtMj+#hs?W_`2qdzT=bYUN`!=Lp9UXq%v+@ zFzB)APmtQ8Ec-OEnK_nxb%9emCIMd#tTYwxy*UlSbynN`yuYcqXR?Bdf*rapmMQe) zk4JplqX;`W+hRuTu{}(Z4OX`2iLU{O3(i}Xf_q;!sBoaZkPx;uwY19qhAd54%Hc)b zNqRq(XSm_fKe$V~IKps}L9S;t;H~H1%Z=N{J0<0YRDx41U!9wUaTABjlt6xpe@GsT zwKPZacheVzyXM9=rkym#JxyD%(|;C$s&cZ>mHiqc*)%{wj^5Uz4z4B=B31WPs z%Shf%x<0qHP!PZ5&*V)@imE*sG1CDx@hNP)>mAeA6cbn$o~lnW6u5iqKt_7g(wadr zrCMhY*&N^T(%hKWW1~l5PW=&-zov{0?0#TPEJnuH|^OYmrpbq2qzPg-eRqX z#F~7n3`P91Zh223Ar7n5oY3;Ru)p6AD*wQcF@~KXx49&C1WT1c!6ril-!I?ve@<5s zm9I7`HBgKl`==r5);?iQ>+^Qw8||2!Jf)|wny~*Wgm23+*f=x6XUvW5N-<$+RJUrg zCRGPOIJb;m=E)!=U+&%8=;9q(-HztX{SzVbiW^Z6PqC{5NEj}9!;KY5T=l9D?LLYW zE#>5{Dl}fUz5SPpZXuxK~) zQ^by8NO>pnhVyi1==4@FQxz@DzRF79Zy?(~Lz0GpX|}IyYPzzi5=KaiGuvHMBl1G> z2^})1+6-qVO_c)yw>MBL2OV{@Tt;Rg%q3|ytVVAJrs&|%Z)CTR6v8gX{4B2NvK2_; z_V_AefH2uIVXwHF{_R+^)zjD*UKf*;H)XF*@W`nClv-pItITF`87u&lQpn;sc!p{3 zHW4Dz)aEKd+cgGkGH_WSOj8q5`*CE5Dvx)%l`)ak_Vr+>e=m1Y=VXv{Nm{l$qrUvr z+<85S{n~yo^ONg3H_G>8_*X}}_KHZ+Qhlq~U(i;^XzgP!5+Nt22SGzy8EaT=NK{Dw z7grnFu-7rCgatSW%RrpEHK9Eae{0J|7ryEa?OPC|mrK-j5e?Apr|@i?-vpx>P=sI& z+|{3VT@ofi*Fn7F7Sm5)OMl1^m)~Um;1^cwy2}j-_as6v?YinJ4FDaQZ&EkMC#R0W z);;rvZ8Q%jj`FNiX*j59EC?k4b6AN7TmdF$K5dw}Bp0~Gi?Tpzla>iV!0ixSiCq|8 zNZ?>p`&&LSLs8L5W-@_zh+ef;vK~N_!5AlsGwp-M`?a7cj0_46F=U4$t(G8SQ2C`v zYDAK{%J^5NA-@wbb|W2U%reaYV5m-^8}gUsjZ6n$;6{i6kIUR~B9X@3%`(vtzOk#W z5oMTxra_cgQwWWE;QF^Jph3Q1r`$j6`=^&I%pD8KLHu;Ik_NAQv3DfC#z027AHK@A z1Mm61^VLiu20TJ4(c#7FpvgnFzGFQ5f84c^L^3O{37aG;o_mQ9L>Y~KlrGedRv}*x zRUcoyyI@dQHK$7k=n_Lyksl{vCLiTg?RScsoVc@cDZF)-pGtI`PZi+dv?90BdcJ~x zdKSddMZKAPXl|G(6G)O(*@JYkI?e{a6jKfV*`*uMYjX)G_&l79jSZlQIH*XL9I30f zoHQU-Nvt27xzV{${QJTd{91%T>)Js_ZUin!&6r*M0Vn7Fi{^ibh}r%vocupU#9|r( z!t%fWD?e?+tF z|91BOie{PEe$MWnGe&%tAMMb8&;C7{Wo2jM_`hzYZ~{@9|1tC_>;ap>%b{-?I))cE zFoOd*S^|k$)<%Jzit-l|T`3>r!Y%e*uPa@I~L=hM^X10dMb z?E-`h686QlxqYG=7F)XAD~Aq!xd$oYMkN2l&sE3-Sn4B&7I@zW@hgw~g${oKBVuS+ zh$mOvq?88trrm-3Cc2IQJ{<4n+Eoo5^fbKRzgGNMmni&0GN9YY30Vu}a?hWCx7Jtp zvy6ZN9Q|vpQ{>BX2_oo8`1Oltg+Si6U00}QQ(Ht@UYyM(FnLWEd<*y`W)jyANYdY^ zsF3)NA2=I+AJh^49aU#oGu2+R;zJ4spI>(k)f$Rv2p-^WOoPyl4?vgN&$k@f-$L}^ z_1(OmL*`pb4j&!3>)QnGK@545bK%O=cQuXC?Fl^x1^#Qll?)R2`|aa>6!SM7JLciV zC*I5TXE?f<26{O<_*dG?HZKZF1KNzL8a6a?a#A92c=(t&U{R5g-`BUaA%Mu270^eH z8ciY&Ci#1b<_F7Xg<(uL@Xk9GtoII|!=}i-dIZ4TXUKtyo&-JgH_6+#%;Pq}H(T)c z*xfhbs}Gyw@aF1k^N;=F``cDXJ9cWz=a=hT+x`vUjL^Q3@0(32$6k!-BfM>4`?!xs zqpe&Ae4_l5D@T^an9YZjh0Hkez+adVACK0(Vt&2f zhyCYJM}a;*dbb@Pmx4Qo_tR}>Xwrox`I2HypEaJo`hB6F z?~_@i9YeAsAIB|6BO$w^M`;UjoJQvF>@>mu2WzW(H&`fVBh7jd=nJg~_Z@HlzU*E)e|F1y| zB?*QIZ2d=wA85@Gt5|`tk%wzxJiQ5j*+xXyU5s6tJey}C7c5Ul`LhH?g=xQ`HP)t3 zYM|KaM*Er{J@M1g z3*w^P9NCvwa+DrNwA!{V;-DF}=_2hIe6#jm<m$GM(M++x)V zXbi{jgn!E$o^46l*W_0PMRS`7Z;XT5(%mtslujXIt+y%R-L+l=hL$)gY|H1QNulAJ z`AGs!pzzlj0QJh^C+s0>he1A$A@x?u5+N*-V<%~bQq zjlwr&vBsN&uiS*5uPrC|-B{d^53h*n6{}bDBQ&E!^{)k7rPTQ8#47{>l~VEo#)^Tp zT9!T+n^=>_?d6|~Id50>i`dnF7Xp1~)_^;iE|G|F1yQ0x?2PwH_l={`PWK@eyXO=6 zH=5-^I7)Hq5x}ej)x)SEzwd4P90`|&vxQ;0oKjAVPAf4G#cQG3C#wq82Cr+^Oaa7h zXre3dw%VP+RHSQ6>$0=9hc-ahxTQlSy5P$Gm|GXg!pYx{THXWvVa2jZty5nHEhwD~ zbUmHO_2Srx#e)rd)!tjWi-Kjzen-2tyjeUF8>YpGm*@T2J_RbIH1UdPpOb)I23Hu; zU-TsKr&p6wVe%muDNb6*glRwkvcx*ara@lheh6pI+ZnOj z`KkbQNA{OgB!qFepsO-B4Y<+8;AoWJ1kpftkA*sV+$K)Z7LcNVC{0haCu@C6* zpO9)Y^q*Hiqm$1G(@tuvC!NOmNE-gf1S5WI^aJ14t5On)2xk=t&*V9&-+NGn+~D4G zr;Krb4^qa}P$@c>&Osj^gNcU3EU{r9iQ(ii)zoN8Bxb2*kctJnwZRp`dpE2{Bn#4p z44z5SceF0y78W71sRa1s>eK?~He^Rb1V4Yy0yzKVX2;2zB+ zPI4R3>DYPwMJ!j9q%`9^p%uyr*>|2S63J~KI4FMeySxB!+!zb#H z{rKSXe9Fskx?-pr3V&l7 zL@%U-pIuAy>;943fH#rtg(*&^e&L|j?OV=*aS}8rDQFxHs$d;tD+=QRlV5_}MV9cf zZR(=y@?bjuvn>UlP)4wo0On{dg#Wx=wy$Riz~5fGII_Z{sP>|L-{~(EaWe_ItzGsEK(P& z1<-0Q_L?&#lekdJ#;_7g9V>L12=$i7`52EW`^;~F2Es}r%56FskH)m`u6h$-w7|=A zmFQJNq_Z)ivWz(A2H$1vW?o(oNl(#+7IOISLn@BBMlDqwpcCdcoQ{L!13wGgRqsSc z6hqHT`u;yDOoevg#TP9Y9f{mMp4K_K6(*la>(9kJR#JB9gG|NGyG3%hbi!#3N9r~2 z9=w;OJalP@dv2#aTRcb#h$vhT%IA@v&WhVC}7 z?J#eesbS=nRSTPR>3^OfM%;FQWJV`ut9!Ig+r(0Fqf?4D&`WmJu9$s5W&WHN9p;7qk z^{IJ)Zx1R*SRwu{qQjxtgN>D)FT4sqqRQN@^2h@_fM}|Ha0l>#;xgSeCb~a+m%p!l z1IsC(&=)H7DjU1M?9~#^7HO`rQ4wJv-v&6a%SucflutQtPFSL->OmexTo|mLlr=Zf z=w{S7GefzMIhm2EPiT*)3h(U8-zE%W20NR2M97+7rBG8K&HA$;K`8piESTvQ7hOTy zC9%=`*)hZY$ujzFb)ex_#3~0-_EcpE+Eg>WkFi&C4*}2rB+#u-0MK_L@dh5lwL~m0DqhC zZ+>aWFRGYShp(s;992*Tn&1r1V-CuW`fN;s%?Sj!GX@Eb(QyyQTcX(c$vJK9;b+f^ zjIF@JriajQjiladW?&&i?{)dQ)m{-x zHPLOaLfEgq!GbsMvlxRRx7jOI7P-fV6vrQ{LB(G|Iyn6~iQTPv(20im5Qx(m4=9>o zinr+(HN!VLa)%CBgUq#!9qJ#g5Pb}u@7{2v$Dbvct{S9ql;jYK59_rWMz7^ow!Uy$ zXPEWW@Tm&ORNvP>Q!jIlsR{8y2ax_z;VTJ^4$nQCfz|Z0S}Tf4vP?LHOi$8`33pFM z$FWoN5voB-8i5NX3~^I6U2agqVcHlzEaf;42net?Z?r|6jMFJ<`fjdw>Z;BkU5G3R zK^c4Z0Higtdl^3wYDH_=8=g@erIfk^2QpJv<^@P{66QKY_X6BOQRh^_uxXr`;}fj4 zw~BoI^i+H&Z@wdbokTyb#7ph^C4tFpC+#k(OH>I=%#dwPwiSU7JX9lr8~9HWVdMiY zi1Jq@A1rvcXC*W6w%q}1o0zjd7=Bc5vW-rxWnwGnF?C=G%#kV`VDAO!b!-=muYPR2 zZ&CmDqNGg8Cd0#a7j8qr0Vp8H5 zHwFe#W%}es$MBo=RgbLpQH_qJ&%B8DH)tp7Tc~>BW&WtF;kLf{UlPVV&UJcvvS@$O z?9{Klg5OfaT4a+6B~ksm00vzqqpx{u0(%qP=D791A*FV5aEwzu$I*R5^0 zv$k#9{?)c^+qP}nvzqO!ZQFJ`>)rp_IVbBR>trYU%kya@BX2T3jJ(%<-49F+nmBuB z**}5q2fsr*uF$w)N8Ndzokhtww$X?;KeMD*(8Af_OblX%wD2&zp;2^~Uh6W8tIjQ_ zwXkBJ#Bvh#DGCm+^-h=j92z);gY!c%7k?NiIO}$$;9IQg^|)QGDp}}UWkCh8DK&GJ1^^!!%f@gUDkgLmjF+wrX)v}lQ0V*1O1wqo@Qw?s zlp7>ce&hoW2B=<5iFDW2BTf}2KL7KGNefe{zB!m@l(^J9E>9fwbO(Gq&Qzv6=-^QPR zuQeZT&M8c-$FCVJI*~wD0W9qd$2RcOQd-`&@g956@uG4U-$*4$*Oa?ef181~3sE)A(~CQPDZ z?6Z;c^3+=2ft)(+Ezk?}%Jotg!7OxFx27z>pR;@v3`>GA&rq@&8$121W|>IcVon#* z1D~A5)627L9V13r>$tjyv2kc3uc+NM^>d8z;#~^?&UoVTBJ5!spo~3xMlGz|;d)uA zErGvc^5OWHA|4poL#Be3qP*u@RH|}_bPOP|ph7}zLKpB$zodcCE1?-O3$$Hv&AB2Y z7Xm^3nwU|{?77`s^m(Jk6cV4Vf{Yj^q>%v>Cozk${`X%KZd!D(7I2<`&Gxh04* zVOO-AVdaZNW)N!{j#1c&%5DzJ>V{w!N@*bbiw1eBlzaWB)y(`PD@AfV(*I&HmGnUr zd*i?%D@tp_QXS~u);~4eO?Ak_jb2#UPx)zm196|X3Rgx5YJ~XCz?9$G9xQVJ1a#mO zG+ehZg;@$ph~Lq*>udq9$w++#?Q5>qg>G!#u6AC*2!adFWOd~$8onjpD93bb#~j8k z;K-gGCSh(X_}`^y%DE(G86KK+d)M5+C(ZJ8I>||^uohE{B-4=(BlzeqbtTbJ_&IhK zh{{`{!{vh%V

d~*BaY`Fe{c|EL92bbD;w2hU6z{xBLL&#mz)8E0YX^Jcm z(58SCwF`XZrHZ;SoxR2Q8dNgzpE8b@mE52l{Q2}oib#iq8o8g&5|mgMQ=<4-zMW{V zt}XLnU(4_}@Li9pquyT5v-vMH4S?Vk(^`5_k2@iiG=aQwO>zGyEsy?9hQFtc0cyF~ zLZyVxZC$*-4jJ^6hhFOOf=TnAp6Gt`AY&Arl_~dPVL~^dlQY`lx$SsCB(1-qw>qRlikyeIAyIoU*Ad2Pyx#>S|hR6^q;L*EUa&cOV<9pY9-rSm( z9(9|@*@j)Yo1>{Cb)<*(f00Es_cpjCZfFh{@ya%cmNkBt`Wlp((~Ao`N_k6hZBO5n z=@hC_#KL?A^gmgm#3Im2AtbM_<{%+wFNw^`CtRkiKS)bb&;ejB(N{Kpkp_W`{N_#W z56=SyF?kW{x=ypFVTiSig*}R?d}2(QH?QRo3!|O{6)t1GSeI&oOfY_daTZS_q23Uy ziN}2VEQ4$O_4EOJO8Wg+itGC)K5tp&DW-)*eT!xy4|65z5CknH2#RjP>x4Z;HLU#_ zZW+PA(dLzAT;$6Hu4hLp$>iZE?wHDN;6#Q-;kL+9fMX+bUHKj@*fn5SFY^qm*+XgP z7jtdhZ)T8L+mEv~E|KrCZ}=2Dk^{WA&oudUp78b<31JF&)GqR!u~PaCxT@Z>jRS3= zkf)Ljr~RbJZQJ_P&12Uw_S*9wh87+R~N|DFRRlCby>VLtC`86H25otF+bF}WXxbSSTEg!9;_6T*NLM%gA+-po18yBxg_K zU)=iJOmD>#+j=)QTVv6TM}jYjqt0kX;ph6<914xLyp`F?7E7CG=GQ$s&v4?1rs`hH z)syhv(nHr8)#p3GHteKlchHI*ZQlm&<(NL~+k7+j5NSUdOd5IDMg7;VMyOz~NiTTswt?jxN zQZl`!;Xc8SSYlrJXv~#o;1e(ExHrP`v7H-+UOt!WA3D15%4)|zR@aCS=XzSFb>V8X z^_raofDwjt$h7RWMF$eGzB zIU%>dYY%8U9ztO>I8Njy#_F8;Up|5c6$_xNZ&jWUkkAgZficbduos>#@fM?+m})(H z>+R_~>W6C$bB0v(5(kX~U2Cux%9>B#>GH%esfMij?%QQvfWK$09m4*Ig6NsO%3|(V z;PpW1ZW``$l9m!Vu>!2Vai_vJwToNdK;4ITSBf zj<*GS!d$;F=9^T)5wqKU&qAt|&Z+r@T7$uMd>nm3qNGDy3{n$|35&6=W5GqS0$53Y z(Vi}4_491V6d!a^y9?FNCQ;tb! z<`UU9GP>4n5QIqgazHi;yaN1*2a2$;K)EP5mTg?Y$G??D)A?ws7VX*hRWRN z1&Pv~T_0hZ>2u8=#33c&-D(%2{Nxu+dWpzZtshx1Plr-w%twX&h#QY5YBv8XmQ29E z?L~kbOM~HmV2WJ-jjH{hn4+?zrjoGYe=)_MF8Kec2cL!UKTMJB$2#R?`>{^{TM_>M zy>-g||H~AOevH%qvOE7j;P5}3@V|q@9|@L`(B8-jhL`vMfrCHLlau5BMTN|SOss5d zKbq_R=7|5D95S(SFtYs@I{Z1WrHzZJ6XAc(?P4lwYHV*}3d7G2_6q!Af3sQOWV2Z1eWAF!b1ZC0Ezy^ed=0=8w!UrZP!yg)fe#GMi zDubRL!Zo!YysL+Fpw6xyZIK&Z-s4vo*hBt1wE{ITg=}!TYjk>OYyeu-(D3+1{Sh}H z6Byk2Aj61_ZbV!;hj_oP6Z%TbV<+fOO|0 zD5j=D`Zxitf1=ZVs{?`V-b?^DFgJd+Z|Zp8?(50l>j`%7~`NJdhF#_aHX{1z|=P$UFLvm~>O z9>?Qnep5d9O(nUR?LW7*3G!f?hX_1M1>^*j-Mn0neY{Mxwl%xAKYSr(247B2ep?Jq zE(L0CAsn8h00zPX7DNQd919>QKDYpVotbZtU(mxKv2%6aiBpZN1$P?mU)t4Q3 z?0@(E?hfJ!0qXIc^zNtdop|~D0yq27L)a4V zU0Z7L&EF{qG$jyRH!?70^!7FQ_U*a(E&KY-apbS^>W}yMT`f7jG<>WeFr^RtE#POY zoe+DngM5>Gaem(m4)c8T+u+4tge}m&whm+#?$GMpzo&_Q_-2F9%KVOhE~W-omKN84 zO=wz~5V0T=WprrqzP|ExqV8*?cP-#DK>B&QK%kfLMw6EId*r3pq-^;{ANk~E?qdPm zXQ%L+PknxRar-jf)cD{K1dE3Y+ncz%@4|f_%%i#2z7%`*V8#%Fk!5Y|cXv6^w)Z1= z#s&}3$DL6pC-7vEf5sOvJy7$iz+ZR+jPGy{;K`(KGCMb*=1YOcJc@6`JD?2o@4}aE zg>N!^44~$Dfxi$2YOmqj)>U63xIkvD{->W{vHqvJnaBQ-FS#*4k3PtL%lL0nHLvw0 zp!dUlVD`c?enam3y!2z@eH!C?z+Eiud*G!S^b>XmUF&CvX@0}*+#K$t?ue~_1>aat z*t{_KU|N3vjI&z?pY*-Q|BR3SjC;hU9`)ycn7!)HAE@8vQL?(ZIokr2{&nx~5uf4T zAAX>L_=13tR55SB7gF_GLfpHS5{Ub7ezFbjs7QL^ibIxnl7}7SxX?ZbqWUGc(nJy8 z!@CmV#_5PnsXg7Y)++5Xgf{<~lFuqlEZ*!f`ENM>h0~z;k?H$7n|G(JEpG8kH#3Y? z^IB?>>lI?>7EZW0OaaU$gi`Pdt?HX9+8Kk&Y=0IaA=zQkKl}H$4J1M#E>UK#YSBxP z(^Kwa%AER9vU#K^Qh59&h;faMUszgn=0K2FC8)S~L_DLq=6KMhpXUby=`2|glS#}TiLxSotx2#s*M(@*xO;avY}`@U&3y^@C zzyiAMqG#=+sV1$&3~us2UtbiAC7ckI$KOCxkIKv)q9!47ivY)MVJhehC5SUC%@DJ}H?aQc5Tj zF%|xf917uthVbhE$`9td3wUc5?(ZR&_f&;>b-djXztOb}&o zg~X|2+k5cFI=+79+5?FMs=qHLAaOSBD3fsNgy2okFrV^^kndR}d#!3HM_>tpZ9jsH zL4Vg&n4D0%(}6MIWwYLk)w@5@RNMf)3+c3KCk%Kmd^Y#R`3MU&pe^T&fqgpJl^9{} z^xxy-((LcCy=5j7Y++F1l)6?>U^I<6Vpatg?Jb4lAc_31vt1u`GGHLeCpL+ zOaWLBsD5Dj`tc8gqw}JqDIzl<@bvEzoq*Uxm85hUOYzbSimE{SnZO8l-p|4XqPr0m z{Y@*jqck(B0AElMrxm7Z@W%WnOVQGs$PN~=!jvE!C;(j=}@|YQ|!VfddGGgoKoU5iA zb50(Eo;O@7D6A%(dX?*p z!He}aDcC&Np~mN+9FhvZ_Zps%7}0$vD^HE89o-@wCU+rKl)^#QOgKgp=S~xe zQriR#A8W9S(ylz_HmNlcGf-w$R}n7V-K6mQ_Hl6nH09n8geFDw>6!FBsh0NKZ_0-W z05?8lt|9xGrUbM+IW#neNmGLpa4tH%>;wvd4-=0e<6r7O&-QN%{x@?Czt@F_XTgiA zh$4a1Cn!Sv&cDNLc{p-|tNXkI7Hk)w6JUWkbT{{3eM|S(BfIKv??>$KHuDw1_b6I3 zVC0~{a2=V&_4W@C7*XsvK9~|j%n=vytf3k4q%3M^vikV@On+A##6!()rgENVy;>f+ z-&T{!NpImtB`xyU$N5mlLnf@3@pEbV2Xe@S9dxU>BDkOpkDzXDPV({OqVHu`qvo~Z zd^y4)QbD$>2Dc5}u@FmAR(p2~?#KjxDe;5tjk4d@{-U zH(}Q$R$>lDz>BSAoV4+8r`e4m_@dlT)MZVA#roGDW$jrfBR>X#!-Z=2Cehcb+(XhW zsjSUo=-pavhx2wYj;XIDi%pX~RlOe5c#c$YB9ywB;0*m&)uRHUM<5%| zq6Ha!7<3souKEq@+P#(Dj}pYp4wKs7^{al%2Ig~yY%**-se`H7l;O>w%#kATY4h0WXDrKk`(Mp{Z(bKw;M)k|)-({s9VC46 zMH&mX+zqjyvtUd9@d*{;e-C1BL;&5Q*Yk=^pE!89d37~sQd+7~_GR1L)@Y<2K{)>*Pgu<}G-c-+h?SydiAp9^;4B|)o$QkWQ`2QXQ-y%8 zYB-27;>&_gw5pdI#(!v|387gTlqF}erp;ixR0$rmeVpry!mAN6SmL$VR|22V%K@)N z9pC0OiQY~z(3o#c1%AV3Ewq03FGSYa#wkKuE~?Z{w`!u327JaiHqw{1FCrCCvZypu zCJN};x%mcmu|Msj^RrmwJowZV@6}HlS8EG!b>W-q^u8R_4PY-y^g!ouI#p2@D$DB` z&0US>Agj*KFzL^(W7<8>QpAC@Zk6BUZ&bqa<$)j91N`iL3p3A@P~%2mc(xD zQcvhc%@w;E2INOR0o!55;A9ZU zO*uILm8pA$)Xkm^NMK=F%r&#IKFUOGv!9MqMfxKD?u|iM7!s3->sJL=&LhS2LVn`3 zsA;iKw8;bgpisH&NTRcP4lo6qHM?p3F~~w*yf2P}i=;CFl03L%(YRmFYm5ta5|{R< zdBCk7E{->n5M;C~sCuH5(##=sg|cdok%33{7Ap`!PUjX))_S(3`s&Y<1a9ZKhz)v5=pCUUPAc6-bso367S|Xg z@E+FRdVy-&T~TBs*6=@OHf&65h&6oi^w(+NQdb*%*<(EYFk|0B<72f~Xs$M$Rt3<0 z;S>0Xf6Dz6QM{$VQG@$X^-9z}_W3xe##*e7nyYPv&hvJa-(nx^M3ji=VQC3p5yTSO zxZ#Q43I4aOr=@vftSF~Nm_Qcgsqf%3QWC>2Lhq*DxS5IHa=CU?T?6?tx*f*D>J_9C zI?}_57Z?d&Wfqri-*2sW_^Zk3SpvRpHFv;;$UEI!p1LhM`oQNh4g+tBDr&_J+?2nx zGVTt)XCY7OWq-r%n1%znTJ6tRyRFEpNn%6MZ>JZ(p*6J(B>hTP7COIo) zZ>7Dl9;33Z;{wCB80rd5$mHn3eofof*+MOlIy>ZgL6ENL{i^0{OtZV<<`Y;kiiq$p z6Ji~;6LPcM_5?5)P$5KTB{&NSJm{T@HzHYk5Nogg^id}>_EBtYqdZ1RQv&pa12&Jm zdnHTG!d7B0UuaUrqaEEy+U^y;^DJ}=Lo9?kCVb*H2%(xIoLt|B6!r09INLip)-EDQ zk)^I;0)>lh!Z4?-_Fd%6tIxQv;Zsk4mIOUWtfG%qYP8D;C$fq4E2Z;^YplkSJW7}h80*$ix(9= ztn~Y44I(`!r^QXpO8LFi3)HxQQEzxfYjI%K#H9*^@Q39bATVFQVkvp%%dfpoC@OX& zMhP38>I!%ldQd6`JRz6R<2aWo3HAZArwcL>?_~!pI+FN4e9-)&w4lEUW(F$|DBNn3 zh#hB7VC}M{vgguCca;&`A>lmYhh>yVzOX28x0^}69XECE#R{SKwdLP0xZ!)eIgk1` zMnP(Ia&ucT+}$xRUHXK z!F*~$^0WyO3P$>i=G_RTi7d&IPwb5qtS39(CiejGi*d&8A9J((5%N1?y0)5tS0=ej zd}a5!#T~F^lHx@x7!cwr79k?0rQBs{-eV7RTj2rpzPc*m5nzF({#B5}jBGcAuA{D} zgwVDKBTD5Hb#apPG==j+RrExz_YyD;OOi0jv2>6AK>p$h)W`=6*D=PhsHNy}q*^SF z3!i_UrE6rfnvvFC=S4GHjk9JT#9^E}zc{L9klk2?|Dii^P;m9==n%FXB279k8qv?ZR0e$5yQw4Q~r(Hf}CY?0_s2WkGq z{xCQ(*X#_EPq3Iz!!ETb8-p1m_SS|?5U;rECEp#jX?NH59c`W;G;5EX@nc7#n^6Vl zs-9+TD4z4Y2GPvMP98v_)TWgs-u@!r0&bKQrPB)<7O&YYkC{1w#;uVv0(YJdXf7>B zP!%;b^WOi#8JB)4!|gUeq?HTkhM?p(thzERe7}$Br`+GV(}c^ghEr+PH>8=&%GYLp z-aPvK>gwDaE6gxPf7eI;%ZtdwHq%s50NFO&yA)mfHrokBFFOZeN}*ul}_YbBcK7dim~P*k9ti)A$MI+OJb%{}!l^X0*_Ozh`Y2ps z2O*8#z$V4U8e}fZaC&(hPxKOt_wyZ!3!{-DN2b5;0p5$!jNSAA7h=K^yE##SxPM5v ztE*#yRe%9>zEc3vXp!SO4)as%?M|q|*Mbf1AWz6@;d&gfqceOv6rg_>Tpr^E!ukNg zg-v%ikS#+qi49oO5Y$9GQz0Ma8z7;8jvw9lh8hEWc!5f7{-v<$f^COEZY|}@?X%;^ z{59Cc!BpwM@Gf_)k^0ocVtZ4}WS_heW)|=KM`J!EQTnXOo|u)P(I02d%C@s6@WL>fo3j=+UBG_Jcx@9oB?t60|ah)2C4E}b+Q zWdz6wc^^hKvpUXPv0ZkxWv^&LV7!|nQTfRNn@(}(P^q}{kEmRLue3Ak+=f1>F(PzgJo+qS_zbDFbx zi9b5Al8(p8Yojt(Vp@X)G;R`njIf|}$6%5c?iZ>f-6HFDcnu(_9l}P5M>ivru{ZA@ zr(M5hp=%R(*j8^Y$(bznrDhGg5-$x1sP4NZRc^P0;?ssxsl+rXDKv%lrZ9w%*)umPpCGAz!Ov zM~j8VAn~6)&&lUV-`)AeX!U1MufC3+F>(_^xup+Y zsRonrWP?CNU7lx7qLm%!-MaREj#dS+HI->}K(R#DuPo|cM%+vBK2jTtG(5Z&Smfwu zTkX(uj0V1*P^-5Fdqmn@ z9mAB^xkieK(>@co7FA-(Q{=fy#kUXZn5&n>r45pfqIU_;ITJKmui#RVs*Al(Yb-vh zOHz#9KIsbN$hSvc*bP=elQaq|3c%AOEWsSXe56Y4Q}pNq`$O>3$I6^!?byb*7qY_? zRIK|Y6WY5dbN|6Xl$w z4^!ESzuiKFsS$}+V4MzSZ%r9;~6M}tEtUL`l?>>c6{jj#}t@jhm z#REZbgaz|C`O4vBc`k$ahcK5*dZE-VkkwFwmS`6v>&J=IBLuH02BF_zef?6gE64|} z+b4bO#|X{#%~&OIVY|3pVipI^-?8WaOVSuL74%mY3;G@kZI~p&2x)Cvi+CGLO%mEzg)!q z)uCsg>r{BPIBe!(l69okMTAF(;m~hwTfieXgrMf@3GGuR24Z8#67`d-d}dksu)wzx z$E*C}V;UpW>E|E4w&fyrcje3M$%qJ|%c%X2YsjZs0ZAtNvzLqsXFwN>BH;I3&`akB zoCn_4u4Q1QNoS@9FrHL$EHj4%x9K?*4ZpTV^aXrzy*~Uk(-;bXtOasxtBO1UQ~%3H9o`YR}(bB7y3e@ew6RRL{a4h&W=Y>B2%cv}`p)&mAnl zcuq61DIobthIA}pB)KBR2Bp&I!Et{0~#ha2fdW27pXsmufkj!IeO z<$mkEBM!>LCOzyZe1-|QApE^}@^8~;aL+{7F|&#;pKkFuFS(02-A!Snu zP*d25m$NmYr+GJg1RWn3M60cE|AqxvA4sdDNf^s3;aR}ZPg%P=W8Pa~7$A<^MdX$2 z^$3e!_Ga$6*)qt}Gd^ywpI3yUuzP^PTrS0&dgyf&j7YG{fqArfLeW!0&a#dYD+G>E z^GS_g9@-@20{l;T4iEc6E-}ewc4?5kLJ8)weaVq7~%|ur&i|?WJPATtWGwW<`s=ne)U9GZ!#b$xL zPf^5LM?o0L87Jml1_rV12f6UBaWAy&x#+~MBbk?AY(P_|tFh>+%>oDQlcy;H1?^q* z#V5?jIWPk4ZrCQ^J&TNve~G!Ur=wl(EWj#_Veg8rGG!M07rzQmi9w~m%epT2_;^*J z%m+`J>#Q{u(+mc%*IGlE^`2!K7Zp$AgRqDRZR;AZ;#A>yTy!y-V)w=JaTmIURmupn zJT&ML7K`!PvsjL1Num<@T~q9m|9P>V`6)z&G_Pv;yWt+&ipzwtUeD%zU_=j7uk6r2 z%tcYGP$rW#AnbR^(KZN6@%9EXT0L98Hp9OO>+%qCaMrw5wao2>!-q5AZg#lR$0{ZH zNKPVf>q}ZYC3O7TWv_-DUgjM$nyiBxLS3epvWN!3{Iz-;JFR>Rwo)*CZa8g&TJZDDlB?kPfMR;=>5H>1h>Hp;Ksfl&bDwVX!$M?ve zpgyayMeo^ijNpuH2~}B=7?|Q1OB9TNdSfwU&erst7wgPqt-UOO*xd0Bj;I)nz3mmP zs8}y6^`Wq0Z;_Zfrw27C5{mU5BLt*#kMbSiso-VWW>teHmYiGHCGjY?W`oU+#`#rx zj-D^8Wa8F2_8kh#j>EqLBV{c$JA3_dB$(Wv!#6mueY2IY^!+V;IkoeSl^J17j|SRrsLW{Rhh7e%5&N+CSGJXUD; zg~D^oX@8o+z=tX4_fsipr0IktRn;mn>i{%V1&5ta+phW9ZS!joJj~>nbLwuLY73*1H7?(`#ke2^D5dUZZOz#U6p6Lv z>DoOV!nh$0spljaj|dtHyyBMl;&YMNkUVb|w>*Vx3JilNX8Yw(4U!T?N?}qW(?ouf!dRn$hvxbaZd|e@xj48Y@j+H z`=>_sr9`mGDa60PkM{$Mrj9O9iT#yHmfm-rkeK*ab_&x|r0Qi^2+33~UhHYJF@x2a zXVj4@Ax;1@KdNFZAnq?435o0s*Db$avkO|>J6w6%lmuG?*vbkqt6{HTQ22|r&2&BO zhzIU4?i;c6B!?i^ITj)Q{Axeknw;frmZCSEm1SAMiN;V_TKt{GW}6int#fm%9=RRi z5+3(*&OB&d=$fN*jASZJmhJkRcR>G>Dga%rb&C>rPZUs4yN!P`9* zH~y?Cpzh4x0mIgka}n6Bzn__MSL>&bn&Mpj6mVwi)S>Kl8PG%;pJ$U z{$1ZJeUQPgVPR9)#=!SSRqBPSxAX_MO6gBByNb;ZFOd^LYSSF%lu7aPEF-DA-HB97 znwy45clX@@c(J@f5L*>B&I>1Mq$eUD_mT0mCdRGMOyRz?R>nl#XUMypB^)l7`7|XY zRc8#(fD`nhSR(8T;^2?glc71_NpDgr2DpBpa&}YFj+v%N2b-{nkBWwPC;6hy=Aw1q zO;~sN)&#rkdd5FlQ@^Qiw zBDgsD8lj5w7|Ja~Dc9kQpwDUxgs;u~RD$;~vfwx?>OwZEjkEsdT(YkjD8>NFRpe}d zmGR3XnIz4St4nK*37UBkMJp8}7+#G})ch?p++S|T;G1ENO!1vSAeE5uJm{!gMRqIN zPaBYP&D^!jG@sX@Ic?{Hoa=MU%FP+nfx_@W!Lcly^VxQ4isxBHp&6?*dj*vS%BQQE z6~|q+L>xI;C*ZKL;2rAnGXCr995@a3n7XJR(7Y|~6#4lq=6#|5%;Di)flW`tM~y%c z1iT77qw+@5sbs;~gF>60`{CD8;j$q(6}9i{22Mw=ddk|k5El50&l^w#XMsLTc@OjX zN^#y=3T#jL1e|S7i{In$-1d1taa%0%_^7g^AAM(60mnVYj_RzS{3N@O_4{A*` z@rF8VCt51D+{3BRO7QH~snv1`3Km&lOM^v9JKOyh`E`}$O)B?sHWqtN#3_QnuF%*| z5`pG6`>l{w!P<&EFcA8`neQ!JGCURCqBwWh^1 z)FWlF4g|qov25qA^4S+&Dsj-{qcub|FdyBZw%NDe8*v1>87^{nM*QoVhBOf3E_C8b z{S!yB)4D0WZ*`#J+Ol%OPj<4pQ4sHM#AnB$3cH&zKORc^)o z=O*DZz>`s|mqS8)=>BfCBs73;>X5NvU{SHiHQkf2q@fQ>Wd@&N6I+(MI@nv47KuQs zYU^pVVS(J>*8vE)@63}oSz}uomCG3oiQ&sk1FBhU=9KN#}8_4w&h`&3Dh&F7wEyDCcD)evu}S0Y20!QX%)D zlQ|Anwl{1nI_JJ`Z+k0=160eXusy@_nOf2vArao*uUWAv#@#Ysj>vnvoW5!0HiOBU zYsuKLsDX<4E)v#9ESE`POB4531vA~p75Sjle!n)k1%(zuy>ZU&jw*~|t4`6jqU0>& z$l}uvXtmo+(*M+e!JiG!dYmfPbRS8pWRWbiecUXi6Nnx?ZTFGm!@D9#P&4 z-hd!94bj5){{pF1w!IKGmn%^yZI6Aj34Ox5Up+L|PW#PT54<3Jdratz9jUd{k?5*7 zb+uGI;`S8Qbr)sK4qmiJTIWZOMl_+)PqcfF=EBC)*JAwZ`I&u4dX9l5g0ZCHu+zKG zmE1}I)KbI`GjyBRv~FLr;qsZ=xAgkHd5qP1x2;j`LT8dBg(f`c9b&XW8rz9P`TVnh z>-rQ(4W6z$Phqn!C*i4 zI11OZxsLU%!G47*LCOGI$`ad?%g=ByrY#Mc(A^ZQa?m@L-bmu$A8nT(i|@H;<{>1# zd~6oQfyF>AW2YCnLwVo~WzvUJ$xkBRM)Pd~x9+eO*ts%dH)v!-y(&rT>$a)@PJv{U zi(&r2IOWQ<(@nM@l;yd%c$GZgWYoD$;McLBe2-6&JjxVA8U?Bw&9+~3+xX;Xvfs;y zbrykdM2{k#Ua&SO;}HcZy-U?usy`nU#5Hr<^V1of(UQ-D7 zEOQE&-Pov;uidV9m~DJ`b~ou0o$q%&v<45m$DPgVe<9f|36~+^xk_zuGI0&mcwX0b zx)!Ah1>&lzJDGKHwNmBEK&JVP^vVxhXpu=#`8Npb%$?lT6-Vv0l(O@2OB9a>)d^-o z+9RxUAPsr^EvP@<`U$&B;e)jrsyr#mP+R~30*ID8O;o|>k+VVd#5TzGu2mkE956)Y z3jisFi=nK-|FOI9k(;WfY7+ozE9&K(c0a*DQ%+RyC0YJB&wmKelkSLYh~UvCyvd#x zAaw!wc6yZT;;;kB)#;|)AeJ;!a-GcdJKSUhXuPMo9G<}^alhuVI6((Ty|Rnr`YzQ? z#TjyjWWPd=?ObW&O--P6W*zobj{rKU2p0oj!P4N28WVfb5y^AxgqD%K(v{4cr}XMX z>&uXfnz|xTxc8u5b{_jP6uMO*$tKYGA|pt zOoe@_u9__QQG+KAL+$R1!P(rjHP6c6WVAj#TU9FQ!B5<7Vye*@dB3w6wh-aPUI7F0 zZHqZCKU7hK{J&p|@;0HCEHD@p02UJQZLHmjagV#iG&0z&wITC?3!33Q5{_LGj zjEzOo*HzraxTT^ffkS+Oo|{C?JmbnRtw`>jzS{r%37qtrJ{-3x=#>~cvV?Ltnqh~6|*^;eW`C@cejT$XWJ{Jb|E{? z*BiShdKeI7=mS5kfU{frLD1@_-yIldfz$QB;j;eiws{Zd!BiE>ir)?#D(f9HnZY^x zTMXH^fan>!sX@t|^RKrx`+YVZRR#D{cd+atU8niH0HH0p<%EszY& zlzY}Pn;RRirDVhq)?P=UXTm6WF;KZE%xgmciyR&}UPSQEu4kSank8Xr$R-2K*1QiF z1(ue~<^{xArH8_J--c${@fz5Upi{6{>3ehXOIJl|b%?9@?5zo7CENR9v&zacbe*?h zhaadEL6R|mcyn~pM!wG$?|mT1$fVOgLXUUGZ+a!@sAq5O_F`O0`(7Ef0njl%7PD;y zIDo4*XIac4?z(?EdO&y1l=+?X_Sip@e!|KbIp?^_0}&*Pw_L=?{j_ON4{W~K@*xK# z8U^N-qKb9Qo-}1wX?58Mlb(2O>Zph5op}7swHig zzeV;h-0LM)a$CGKMb~@^h@aQ0VY5A(=_$iOou@64q)DE!MLt6U~>TH zq=KFxvoI~RMjn^4%yxnk+4|ACj&Uckp~|aYsnkQ0Z`90)Xr6wr+Vw)LK-m2$=jU`v z$`tT*t%)UM-L#5a5W+E-Z^#XA^SC*JPxQoUKiw%ST;KJN@>VnaGmA5KpAyOw^A4d6 zs(HrZe&y1Bj!_>nIkN3KMg2qiZyV0A?-qz=v`2`2u;2pXo+GpeDq5<`^TlNJVGA}s z6OGJJ>ObT%;Pk}&jg7^9HZB;M1L0GsMn9T~7q;Tqpco|(FQmo(fb9I1GbQUp_Gx;U zqmtpNS9e51f$Ix%-(k$?R#gtrx5Sgs14!tVM@Ra!t-{^QEi@h7uM{l$hKYUoYtsVw z<94&%h?~2rOGK!;L@)YLjL@}2lLwoH=c(S76db=JST9v_kiw}19uzI6eX;3g(rx`p zz-0(m()x^-T)m4H@zp3^mSB{U>7xl`n^>$1+>qoc&=Gl;T~6A7)BwUlg9 zK`tuGBi!bZx2gXKg9Ump1p@1Ne zNdg+ajwp+VCDJ+OFX@X`t(#Og;h$MpX58_>H6!NwWNy!LDIIv@;}!Rg9UTfp)0(-< zj=sd4*Eha+7FfTM*_%iIJ)d!MYZ2?{x^&F&U1D5e%08Th-Apcw<2R2_+mA*p*6>B! z%*h<`WFDWeyn;Yt2xjm|1}_bxDE_4w zJ#Z_&Rk_SWIZxOmT0DQ{Jr~a}St${>BJIb$*d=-6RJq+KfNgI5!PR`-*+w8J!+qP|W*|x3iIlD7+PVCI?{K<&S{FNDz5$~JNb>A1k@6Ok{H-lg6 z4bQGzDHDSzc4}}AE2I8{8=vEuX)+r~BuC|0!2t6E!5M)u!4EAeTgQy%_)|Bx3-|2Xh9 zR7w}JU;?Sh4p@G9i>xjTSM)-!aA8{&w7ynL2*BOPSoaU-b-S*MRcN#J6kFw<%*g!W zzY)xi&w^pD%@@`cqv<8iJ!=al0rYZDw=wQwyrIhd;$4C{{p<<9r^R~a-_Nx*9c_a{mUN1^=^Wze#R)wgLKUuWi1-!iW8P)M`L;L$z@sfDhsmM zS(b&H&$d6UgV&nb`?#OQOQ$Knc?S~;9t{>chXocBI9&DRqoo1KFytj}>CWs^Gmp)I zkAuSz-$YU2Q$YqTbXIJr>4=LIS6LLWSo`i+3tHQfx@=Oj(PQ4WeiYZ$_*oYy3Fj<& z5k(Xm{#@Sl1afYvvwK3#zzND2(50uWG0PdlpAg3O6sTqrUoKi3gHQ9Yj$WA@sYu%f zIGV3b4QUd~u6uXh`IB+?h|5=4bK+w(exQ+IP*m7-vn{Jls`gTK7p5Ua^(1H8ll!uS z9zS?%#b3~s(DiTA+8X2r@t*cua}UhV>?i48E)#;;(UmzQnV@tyuHw-g!zVz?&!&v{ z)jfw9jQ=c$BJv1sOocEd?DA4s9@UWjFkm{eAFYgvKjA~rF#L2i245IKS>BAsNs_+~ zy;JfQJ_5Rc9|}Kr&9J_3qp!acmB;GAAKMwZ2knDLcuni$_H1;YJ@gEJ6m{9w|Xs5y6o1CLC&ILjxY_r)L^U` zYw%8lPTPK?PDeWL%Ky+aTRH<(3e{F6{0}8uj)l~yAWWOS*ZR{r#Z66V!+jiU1w>{S zQ>Bw_jFv<(d}lO6--U1WWE2Lrpw>d3%*BWZxkf!ST^B&>bv12C1bFMy%A!U}^x2*y zhlV1O8;5Mm;ZUp8{MaW|`UN43kyFecB?7Jc{b$hMB)O(gx91Mo#0G`9t)ct?y~2y(3%a4or&v@T zvK^jfA`k6bK;}Cm8}y=-7#^YbY$GAsJx3f5$!0}^L+bNiLvjZ2!Qd;dv2c&^W`?dl zHha-<=QBwKxG1kv3K(*uknWAa=kcDbwOm+%n)@AWg42HSeiKb)&Px3sl#vrav_wYY^{9VAhb^)+03eu0g$`p717jLuInwF z?h<30q|wOu{`XX`(fu)xS6+uidA8GrL_Fa;L)9+wud1r=9!>S7_#iaJ^ahPC^67pk=`C9#7qTa89wMU|X1`QkXX# zAd@(5t|5&hyx9ZsbSt!O&bn@TU+Z!obv;uvogS9oMUa*^kkDGHvjF4YD#Fy+8)0Bt0^s7~Qq1Z{17J%ulQas9ST{8DxYLMq;E4+OYbI14w`K z;w8}8b^*c%$NiYPcIpsPDIT?QZ=6*(n@_QL!IJb6;}}Hf_1KL&Ln=3ymWU4(jRwaY z14AeR+}v(uvImH7ubDCENPIYUe;g&AK=Obv3`0eOMqaBuKaZO#`jyR_r~N|!iqpE3 zFP&;+#pl$YL*D^|%U-6b?=DWTBNdjPGcr8wWe~fY0hQMS@atz?wVcCxWrQ zT5ZQCGC|67U;vP;C1>)4uKSbN0q@Pu@x5-GYFl41rOD8IuP|feR|{* zWUEI;J&;EFAkF7MM|<4~Lbw|=zJ|l%xKT|qKb0Na^Q$X0Cdq!`deCYmHSm%`1iQW8 z4{MYu!+RGJ#Nx)@M>(Ql0u`*rJ}v$d7)EtF00W$N!AsJ-!ux^5Ne4rVIy;afCCbEMl5F%SRS5@3(d-( z=bCzOwxU);S3)@sbZ;3^c2Jn^>&xZM<2k6Ivrpd-zJ62T7M(Dk&{B}%PDX$=iO)O+ zmr9j4isG6ykaSoU#(|?`$BqRa#WW8w>1=z*cI>~&X2^{%-eYK@3GoFu`djU7k~qNv z1gDUs^`L8CkAEN8F9s2eKNhjD<~b8C>qXBPpmv11FrDG89)@TLL;fy=Ry8YiriOPv zkdVd6%Kz`oIveAE3UvSfW!8Uds7s0p{mZN?OaAXv$;|(Xu$fr@n<|-!h3H>S{eKo= zvvK}Qg$oF7GMXk2RH(p0L}n+fX9DMy#GhQ{l6sMnf}uY`X5!D5qkwBG4 zjw_Lf-}X{EBhrZr^StIg+5E}%uwzQRd}{ux?p|p%8yL6Lr_DS4(-2mkAEyfvNc0Cy zUSmZ>2@fg&Du@W_7Z61$^d>Cu$0Xb?OOOH29%a!IKS&ZIL)l8R4EVVVerco-NOKpm zfS^HMURqLK8yXl0TA%>w59*%*31F2xK6~*zdZ@Axp#dJ?tO-g)Ke)}c;S0~qAKHLT z1ReoFNlAzAoihPZH!O&2AW;yPd|i5mmkWISyno5~31EbW`yVxETWm}K0FjDeb!7z; z%;g0vpJB$4=TkthEfsbS>KJl-GRm`7pdO`ebv54gD(;dP4>t*pcxDp(iw zGcuL#l#8(0AhuX26+JqJW3)`7+@Hfa7m!yTFm|*dLy8apJj-{8*P~)R2a&)(v1tFceznMKWP7t z+Pz)i@6Hs0bIoQ3qL&@w*3dsfcs0FXKe@%<+($ohd%YE(J0jn`@Z}yPNWEK|y`VADw?8zynoZlfE1-Fi9R#3-@UprfYT=;O5*I=VOkcZc z0t!T{4}|h?P|xr1Kf}}gxIyhXcRMIupZW;?;7?2k}ranzw+NIsJmw|2klML8*c`N9TFpK3NIYYBGEF1Ow^)n-7JV~*znbeQc9u-qGAd+s z+b&TyEmlF0jSxwPkmn>Npiqs(yOg}|YoyA>9E_Vp69*pz4D>(lY?|%NG<3R!3MRJ} z(LdRDe``eH;*t`qhsm)WGWE7ZGMaC|gNSpeZK$02k~)MDvWW*fY%rFSFe%I_fDs$pG1Bdov{aN;y}EB-5@s&knIZLulGiEXuHDP|T&Gx^Jvp>LYk->7f*>hJflgSUG8^J$NEJoOwNY>~S*J0^* zcliO&yoX#nX0=lYJaM?JkQ2?Z+s`eiCKP##WK(ywi9RAfO8}Aha34i2O_^YV@hXfK zJIm@EfihBAYcnR+7kovzVMow~+FqKJU~(?k1bQod4WJn`9kN!`b=+rrELGS%$TGgC zvN$gjyg}*-jpRlr*M;Wl^}8SQb0?b@(0$(*hRIS>(w3DD^Q-fT+F)=RAPSDB{qCuj zLnIs>tjyHuSO=Ht{&UJ6{G_8u0}lLxOh#{9Z-r_J5xub!T+<+?M?-onISpNV+MYB0 zbVTB>-rnNH9|`TrUEB+umwy%}qkKX*!)rB1U1*xx?lw)k14lCx5VguyhdLnxcX~ln zYH-;Tb!cGOuPqG!-+STbw z*jt1uYS=R2XtwV$AJJ7k*-7BRQ!p#rL;z5Mpg9%I)Fq%`k67>UjL1ntcBdg2(KxME z^`n4~L~%BpP^dICRId+POST{TpKJ-PEr zO(*+4_he09Qrtn@m@FzQsbV?fm9fYO*0^4Z$VfoeWp*sDT1|k5olP9lj!w9vwAB|x zAwphkooS$i*{Ru4dqBRmJ*RGcehd4c(k*N};-an>@i$FvnlV!mhPwyGvLe;V4@9@d zQ?Fr`?Q3=ZFa+u9Om}lmSIWt^`>5D-Z#^W*wURuwy%T;$x2!BAckDt!vTGb~*S)Eg zd^icTB4E)b2~|*D!aejPb>I*D=U!ZPP)V zVdd}}pnMyJO_>Do%~Y${FfQ@!QLM|j7mDBh>LK**L+bqe?Z%_bK`^O=RSyi^qZzU2 z*mfMw&Ta1N<3$flru@;z7={h`?w`~cZZ*f|h`FYJgA?Yap{S(m1P9Yj$<;(>51r{8Gcg9KEZbWBNrOwaqVs-?*0} zXwJa24!^w|4GV!WEXDNGweg;Mxmgot*G%uj|OU&UZO02#V}3Ia$g*DP-7fCT($03HAV0*Y$}qblV;^IFTDIJ z1BlNJ0JU7b{xHsqy<1tC zfj2@R%B=7s^{%VMD3^HRi*}qd6phr)c+k-7EGmlowTJz4qP=te4pkrune9KUpw(C? zC%uC+{N>LkL@53CR`DmECiHv9HCWD)q9SxBLr&0iE1TgNzu0^*kq)$;b&dUAtu>_N zz<|H>J>EncRWpBMDhy^(}0fsPEtDJ%%uO7$l&L)X% zZA|C0%iIdWf}B;1N@^0oGPKI>>~i@yX#EabTyK0#^|z$(`FV-ix6V7>7>NiY-sL>6 zdXytnsvItp-+ny-9K#qhxLi0FrgGme)rV9MsmYd|!#s=K&5iuaw7BU>WSI!)5GQm} zvIt63jmHZF*wHe9TEtG?PC;e$0Vi8ZF5ChjioY5p%MPKr_c*dmx<-7bENMI*rvoC+ zv(%_iqIQ+joeR;3ZD^u7#x;gqaUZ4y<|Iwl$Bj|Rbtu~`&-~WnyQ0`SrxmP-%!|q0 z-q5;?__*zxesdplDxG`L{1^uh)y?idS`9SEW#h&?Ao-5cM5||~60)%gScLFh()_m# zwZ~-#WGvrPTh^PbZFK+A35ASqo)dYCFC?Zb%Y5kZt`OBRJ(!l^KC-0HLy+3m_g8{` zH{;&1m+o@A@q!F)Ndf8B?7g~M5(7GEGjSiRoWM-C-4S+E6N(LW&oW!4a@fKD^%BbU zR)}M(6r&q z{HfcIB9h_2+(2lD_zuBuLl>0~R`ZgfK1lhCU-_#<{dT>nq)5A2#fQ^3#O>-AVL#>o zaBRDD({f%bLe7??giPaL$ocWzvUpGmz9@uliHn}Vq7IS?TX}h>r=p%m{ts4X+Ch&i zvQ#Z$S@LAxy6Mo)zANk2_;g)A^rBPSp$1GG+=BXOEpTb@796DOuSjuVFpwjo{a_8Xb z#%0UzceEI62P3#R&EnBboc$&LKZyPiU&Ajg)1=>lso#^K5fF^CM_4MLp`=te0--bw zCM>`_2$txuEsP&u=x-`d8Ex26#V=@WhaICg}hPnK462pCUW zYdrF`@RVvpnXM`x70y74tjS-YrH-+MfJs4mz^XOUNMSP3%82MQHzvs0HwD(X=0yHd#ZzyrWk{gBLlq~UpXK%tR(^%O z6Gd}G#6JGiU>4n8=7BsGgkVPdP$)3O0XLvQ_cD7aDora0h2+uEL6Am#~;o z<`6qNj~+>h3VR$Rp5#l(bX$bVT*-+EEmnIM=<4r#q)5HtTxO9RysbjZgr+#xW{Qct z!s#My>@ZS@iu8b?Vg3jtW2TRvV$AHfkb7q)DWAu%YHJ$UL$P9J<`(Yc?y;V}@Vd7p zk>pgFj_pI)<@@+mg^jNbtr)_rm>%$%F~Dk_g-8rqFZ#TPrdc)3AC@#pJ!+S}Evd-^ z>o?zU;%gZr?3D36-)~Zz8fB7i!v~AGXLP-jXo+7Ft1o!D85~%&!xct<&h1BJy4$dv z7@O4U;!`{2a^riBX|tE@gp%P!QN-C7-1+M)=FBB<;zn&WWIk5xvg83$>^@1XkN)P* zSgvmmA1#+{&sxQI{%F~Ikv zT61MM>7lktcq2^FF%Mx1b2+|`$wlwmQ8M%?97 zXFUjP7Q%I^Njz5CVrT8EIh|IilfE&H{FZ$?{km35szI16o9XWkQ^(qUG)IvNS0HPHry7c|d$ z`D&rblRB-eP(h2CG+1<7|Emm^g z+6psn)9zfLKWQ^vcb?H~7y^oRZJ-T0vwVv(y6>K7OVXsa`(NYZ+g-GO=M9PJ50A1s90e)h3pi9ZZ&~?43al_(fd4R_nv`^l&hXxyGd*NYIEA?& zcn7(+Uj^jPyzudiLMN~E@w`b{fp)E-^eYfmKnL&s!wyE`4ZI9xvQuMNvZ6PJV0(h*p1uWy1$r1mjjy(zF2 zcbmp<+2%8Ch_}xbBrkfmZ#u(_B>Ns5+@Ke@O_)PGHw0Wg(YCLdt_?JfO(D0QfBOiT zbqcG^o^XTGS4L%Vy@%>b20w~nh>WH)kEuPiNw2?A79SjSY&SX^Al!u^N3qwe_-yl}nM+R^A?|bt} zMN8cd;sbGsK{ejz-&0)e9|iXg-Fl#U6H_T9`Z9qocDs_rMc-8`pCB!BM@DrsADSXr z#>T?<3Vyg&iW%+s6;t_!o3eHSARBO|Q+JAmw(uM6CG^tpg}PsvE(FNows#tjOAXax z*&bb}A0+%rRT&qZRr_bElk@B$DctLHT*brFm#+gd^}G?L0hWrha{xa9A_qh6@QvJJW_0pTo>|3GV@0;6z_(A-A<#xugsc4 z>r91gHbb6>LN!xj^tdv!mq+>n-(8k0y?`%Zv=i3#OO3iNG|zKJm%3j6HsNcCl8>!g zK0qw45OKN1vEaoLUdRL?i|O2Oko<+~ehH-GC3OHboSn$Q$^|`lrp=R@7vxRqg+OO@ zS(QNRJH0o1eN^t%l!~gLn0vcK7I*SQeY$ic^9j3UI1!;RNJr;&A8<*Ho0t`^HiTaz z3mH`B0UJ}e;N}MA=DZs4QWt^agj61Z-V8EI!uaLUKA79HiSY98uT16qKws=jK{kC# zX;FX|$|%6vD0HsseYmWqUZ12^!*lP#MEsA9LrzF6vzJ@PKWkY zC36g$dK#_hpRctM%Al<2+)Nr7T_-#G4;b1q^%+3CGwIwFbVx3c2E zQ|@sB4s%tGq8u3+vyDl(JqnFQCnkflQM?t=OF)wUk{dEdV~LVTsX*d;yS^&*xS$uy~xK&z~;a z4;fy{>=DFPHL6TTemq09zCq9oH_Gkg*ioP9TCHRyhlcBXVRI?Z9gw{@3flh_83S_Q zeHP_Cos6A`7qoT~>5+Wttd%KNl^nCWonU}VayR{j>K-Fc+Gr=NBfdxW^twrF1}8r{=o z)rO3y2yrCJD+SsSst!vlGr7u=eJL=g8V>=iDDZM$*F277gX~Lo!sud!!tyj~)aZFV zDQHQTKr=Toa%R3`flGC`J}jk39%N_5<1e{uM5`Ozlf58?i_&k~7dJV6Jo_UnwW#Yb zgDA-ZQ7+VIXnuBlpRB~@az>r3_b?%>f)>Ew8}9426|djMRF#6E4tHr3ogJZu6o z=ghk?vEnjNb&!bu6B4EN5_y}c52O7iHOu;Q`01X5%hh*({puCug@RMaFT{9#S-jT} zTubXOlb;RM?x!H_DukKHF1t32NFaNAnv#?pvXsoVfQnT*q~fAQgt1QrOb2>HWrg87 zK=t?vTDO$8BE|3%6)N%X4D@FvhcAnSnzCsB(}1ae!ljdH4QR=lBMwS*o z#DDScFy$<^p^WevLMjgBQC@E{CKzdNJ$mWcem$6Lk8x!ELJ{SRi@`-D;%Ip42qNbg zNx7=&kU1z+ps5Ob%}nqAJHUfkomegm$!`?4?TQ(~;$V1Vr)2fjg_X;8Rnxuq&`z)5 zfHceGATZ^(CXuUH-TJw)#q<*Tao!Fs)9in&kpB@JM#T_3d-zWy9(uhNZj%eK#}FGw z$mR_BXlTa;y8Zmw#IZ#4l(HY87AB0aK&a&%ukfWiv=Q5lEs95g=`_5M zNztPz#OM0Ny6rytt1GFSC_AqmcB@}R?xly_79vk6m5 zSTbhpi>bIO?{DeADcNCEwoq%hfn5kLG(H}9aDiW3#4GWRp#rtxGzK0ylZxTt*c ztuD+x!&^$!B-hGUspqtoMmtG$4o8%OHG_&rzX|FuNU*E$NlB<*==|9V0zt%Cs<66m zOQ8wZLRsi9{iiGC`B$rETs~XX_eLT3i;^CkB01?z6+*xMBq8aQbv!+2V;Znqb=MOr z^*!zXYZ^v^m8>40Sj~DE3B1d+35#4Y_n*Sh*+Sq)lm6HX7+bOhs-t z(uyhK=;A*L3>JJ3O;eGeli76&OB<&Pi+~*Zyq6Ly61ozpat=Mj z!wdy`#Yu4G3gT)voKMIG<_NIP#!HlP?LIsg-Ho&EVw6Di4}?2XBYgbE4r9CeRB1aa z3>!Nax%qt=YHL~*=F>`XBpN6%=y*HxiC5(*N3KhzQv=2;r{y!myoVu965qhIWPCIHR;w!CF0X%+GCqw zVP?v<2(8NkAIGE-Tb#`d+h8ynxh$C*;eyl*Iwrlb&BaLxlpK&d3NVb}ZZDJ}X zy#m6oxB8UT&O=o1S_>^d&AU?wVWJc?uW+D^n8sO(k!+#S%Uh@WM_7P%JuCz`&eT-9 z25{oGe5>}vh}jggw9?680-9w7@?`1vDEns);UFD+`5o$(s{KkHp-wA<6%|f@RZ11} z?8i7Te_e6&o%T5Y_?|avp+hoNfu`)I1p=*k8NgD(bA5Uw*@xgG?uUvsWqPBsF%U^o z^9a2mmaoA7+?mgpM&PL&t?vY*CW&F#qk3D3o)Q}ewC?1n=;<>n&DnLexO?Tk{p7(v zW-*#hIH#oLcot+PRF=hBg9<-#ImGvU=@-Uv7Yam(%&;!T5Vr2;TI1bC?#$}CJga2_ z+7(A^e5IZ&O2Ime0KV}@qY7s!W%}8RbNa~eUCGDh2ShpjTVrW=*yYh(5je-D@5n?r z-&4jLAO@GW%0_u4+IS!;4kx^N>ueq?83ShKjwP+8qP1sQDe5?=fiP^ zRUCcNyPG#NZgIue(R{`;Fy?POA4!1y8gDLwXKKmgLK2lX`My@?75TtkAL%NYD~@~9 zp8Ue~H7x-(zK9BO`F>-uk-d3hoJk4yE9;*~20?em!Bs7)+xFBZeZMYiKLZWq=%N%m z)wndeCSD>BbC=@eA7pJ4_vde{*qzm8^P+!=N7jmbkGdv{b*|I$auPSQ~%G8IKkI1^kMj zeSZp##T#9=!Xe>0aoh)T*Tpb4B+fjSu42JtFg>`N3kgqq<1{l?;O=(@rZV&UBtBc5 zI~@3jMmQpbkhYa8)yVwZuH$ z|3U0vT$}%2@H+pD3jagE@c+l_$O!$G5EK6oUgv+~t!HBYe{dZpHje*qT!;N1h4X)e z>#+at9TQ6b0p*zdqj9YN5jg+B-~1Q$=D(pf|AoC_`A-k~KiM00rvFacFcEQZvNQbm zv;QKSU|?Wm_&=3RG=VE8Sz)r#MoYLcJ4?Xh6EDDRktq%W2~YLSWlKp&x{=UrMLUND z7ZFK7K}iJN?s1;*o^;*()IRxG{q;QVaozH|`B=|P*A>qEJ%M5i93M8Q-R8g9We9|= zf*%LvpPHJRnVO0inXUnOZix8yJ8HNJQjkM!Wy!zmS#TZIEI@6Zwv z=pNKx$WXw5QCAm;5Kw#S2e${s=wH>2yM&TA0WTjoNHn89#bhro2e!06azPYv4 zK8>H%z6W)2a+w9>Ru%~_0J{$-*d|B|+zSg)md-8kT@{5JiJfNz>hi5r1K;ZCz%DKT zgb3nAi3Q^1dutz}A%x@~HVM2=gzn!#0`^>5KamTO<=^WG3gig-gYc62aw8zWN;q?@ z&#>7)k9Q0mq5)c!_l)G<(FM~Q83s$yhiCe>F}Jym{k>~jd6k?$%~<%*_@I8*de?BM0l7p;Y=%r+w2DhadH@W&(5Vq0|w@A+Dbf`O>GY9 z-!k~8Xs4l^S~zz@PN|9E>0l^JqyZ!SNF5J?x*$wv9RepBo1CN>paApd045^P$IaQ< zbK$}Nd8HT53GtPD&U>38W+Y-YiH)f4%G*~2XpuI-MZh4jDZAI7aXVq zgn?v533riuifH&}Irnw%4p|crd*Ubh@Fz2_tFrrx z(6WX9@rSUv9dv2?0T2Ya(9yvI=TH4<0H^s$!q2vC zgvk01Qnd?JwKGdi3bz~oM)wX1{Q&Ha_9F-aL{Qut55?~<{z3SdxA=w|&L_h70~rqD z-seZM3+m=U^b;5eL~z=hhvYx{tS1lFXY>PjMVig4uxcfSk0X4k&<%+L5It;`rtQ#@5;ot<>BvCu@;X7MEkqpa*? zt7yOVP`SC{q~8tFR!jP^SipDW_M{n595{1rXH2iExQ{G6?iCCiLqb>$^0BksyDw?M zIU*zbS|7%_mPb7!E-WiD=W+>1&rkqH5ce$(-D(v-#9qPJ-!Xr4#^+s>D|>ly#7vZ8 zd(A4jdInP_Pm=>PPi68t9W(1`bLrES>3n50Y#y%Ag zx{=^jLWDY=dPuuM9$6*}v#bq-B8Sz;GLBA`rA*yqc~AU!jJiAva+*6fkHE?H>F#G( z&>U(#l@%`HVwCpCC_eNPUcXl+IV=Ih_~nmjUAwLBDyQh{c{)z z-`h}rZiQ(x)g)mu-WIVB0T&p;F}Cb-@T49hd7MvyU}u%O^_G>`z&?WRV-j2n^hwhS zKktk7Q`G&r*vdm~zNRu-mCro6*$8y*#M=_f=OmY9JDgzC^-9mTpj3cWfa9rGlF*IK_l`cOuw-6ce ze!$qF73%5^Y{hC2a78+~|7qgFVR_ac=`P0T_@iiQGOl2tIt>e5%-@dDE{BjBBBm(3G||H>IIT5c;~&GJEvlnx=)ntpS2B z33nbm5W&A01iH>veHQd6Y7^toxVU5!1ikoK}-mNajR9pNbVZ#KmP zddW4RuCvuWJdPXNO77*Wm#TjLTC>RKw#@w^Q+u55AiEPQ)fUAjT+ynG> z!uT|28SX_*331e%*M@2-t>MBF*WJVS;V+-;|5>DgsdOS))7NrNl_`sDN6jlWhic|s zXZW|D2%J$AjxdFg5=3ALc`o-K?uLP-bzMz?LQRlh_A!=yeBj$Z0INqw=wpC3N6;A9 zA$w;7I-fM&8s6;jjqC!xE3Rah)u3i;$P!SBZoV^pN!7{n2PZ@)bT#{FvE9XKs1!Zc z1Oayv5z*4~vu$$e!MmFbAp*qu4% zSh7@RNJZW~@|xznWCn5DcAQQJe7*oCDEQSy)+yF)EJ#b2$LFr(lS@X!LX{HJEHXy@ zck!KhIm>>jQ^^kvXEN9bXv*nP;71KWmMK~otFTs~Uj!sRh|`#avtluHM^oc)_4RLw zI&S(S8Q5%OWmJP~lQP#;G4u46V+q{=rVq0>02PgzoaKv5IyAT1TwH`FbxrAB!aZ4+ z-6;e7FFoj7Y4!{ul{?qoP0}p?u@~I$vRQ(&-HC{+nnX+5ic+2ewj%x9f|hIzR5kv8 zH6vQ>Kxa|J66U8iAe?ZG7bpnFJ~=*FG^MRjYK||HzWlFA2#<*=Pa9Iywdj;o!0f7q zAGalHw{Md#$Lp}8qS(bZPfh_B(hsWeQz4PVStoV~E$cH3&Ic*l=b$ssG4qOjrt{6j zZ>{2~z^%STvdO8h<*mhP#8b?04hU%8A%>w2LH8JxgZEB1n9#-ifAmMnxO;;9O`;`L z$WK0tRon!jk52JlBMzb$S?CqACLF*3e{|x^*(c`FyL+7AVXF4qT$g%7gY8=L{e5hIe!LF&!HAfLH>}k3uK{6=G zl*Y&Dv%(8B{)*+50cKG(AQ4r@wZ}ZH4Du0U~|zao%=#^ssL}?aO3`BEkg-%n7;xgt8iAE=IwT;#uu1~irOImWgwfq7CIhae-kreGu=HxJw{Mr$#J||^;O85 z{GctR2|O2$f{yc;ad=C-U({DTi-0J&d)_uMVxEWS#5o>_3{ISbAfUSpplE~+cCmXh zf@~S0>+V|cn$ca4#>vsDt^<{r*1ZE5vQV$L@Z5Z1Be|quu!Idlj4Le^rZ`_3vhlXX z+Kd{%^C(C}(fmFHbnu?uO)qjBahs+aEF(si*9iGX!ByWCPsoI=_lv{bAVPUC_Q19& zDFt6J)UdzYNx2b$!uJhjixv0)Iu4MiiZRxx+Wt0PAz_b(A)NoQdWcT=IX=Ce348lCdkcQerg-v9W$t*}SD!b1PF2AM4zn z*CRYlvep^O5sbWHTBa2()vt6o%*BRW@VQ4)p&J`8g ziFDpzGASW}A+uQ}E|04MctdLVQWjgR4MZM+*5fsl)Voe)XI@sslg-j~5xA`Tej~jr zLn^>7)bh=ZgkbVyv`JT+jT!*!M9vz9bkvL<1HdZ}r%-3Aa||4PV0)ttPTR#%S#$p; zvST5eUcElv|Mu=85-K`=7|QF&5D5U`Ka!3N6^+nACE3bGBL{SAla|vXwu@s$I6z*g zh4{t+oqJE1?L|R>jQrL1eGX3`GT`g#`*^*~U)Y$ai5yXsrpF%ZBg*Xu{FHtBgMW!k z@}hKM+u5#s|EAZz1GD!DA67YYt1{hm!@%f?ZiMFe?0l7RbNJfo&nb@ukD+-=2;1Ch zqwP#Ew~V)IU8$F(lPASo6K{Jl+J3B$&)lyH9;SS@$18D}CS!xckV0!nZZ0wAyP#!T zCIXq5k)jie0phjGWp0gCWd2ef;?6uEs-93^r^exyYf&$4FBG$R6XJmHSoG?tY6h1n zt@zt@KP0)YX5j?s@z?asmQ5u0>{xTrSz0Ulnx@hudzicIKv$^>LFX(f&7!6trk&;d zRn_yeiLQcCTw%o4P?7934aN!N%bc2aH(y$@0!GZyM-XJ8&;owOJf+EGl9uGJ;Ze*6 zzNwalG4MIbD#+F~?HG3C=TQD=#f?4}(cng?f?fBgP5-z=IVk zewsnIyWMP}@)>P&UYfCUkxW4pO;lUug*d9b-ieS~{boT1x_4kwfi}aq&^KShu`KSv znwAY^5D%}ftO-kPJ3yRU1f#@oUqN$L;ftkYR1LJ7UMP~bwj zIYrxAhtmp29ESI!D3c%uaK-7-M;F)8ta~}E6$&IB8^?1dwa4MXlw3B&+HD*7W0Arx z7sM4t0VKau6&341&1z^xtWnHH6f>(<}#jg5?l+bxlR`;Q7?(wK+*^h#hGwWq{tJe%>d zKX+MZHF+;H!5xIz?@=FE_}dRA!IWw z-NLGjUe?jtKFgKms{YR0#>|_6tKlV`>ve30Qq%_Z!ef$^$uw=s+ZsSt#En(+_|c0s z@dvahIE`a}T60t}=;jq)uRp{P*|hs;6>ye+D>WDXYZO!Sw7I=QN&sv7tx{9LA2HMi z-Hk4v@f$e*P!qG`sG221O+ei`$kIXB^buM{f>jAcQ@IrdEgipY@qOuSU&gd}0_r#M z=s1&>xK53ZW_5cWd}OERJE&IE;}c8)wkXp3S2jeJ@f&c3MAi$N1dq7yS@JAfoa$z= z(m0m4Bli$=@*b-ao(GRDsJ)a8^qN%M=n0`=k8Mv3Tionb&u^+}FLd9)gM3_NE;08P zSGuON&Sc-1J(;C`W^^()NK*POb>vo3bGN@sPOZ2z{8alz_Jt#B{2JKyg5_schnDVpiPHf|NiBs&^U_;sb@cdhj3h?N(Q;b7SP!m7>QVF% z(XZww8rHRFj30_jXGzh}mllz~En=A}KcsT3@G$bk1_{YNxu{T1slW(&H(OQqUES#@ zP%ASoAfd6{G&Nf6rgSYz_Yuw~xYRwHv2{kNwccKXX7^U2n=7F9t*JUq-mMvnG6wg{ z`2$03A6>>n2yxb-aJ$|8l;V58Q@lV zL%z3Jco?=ph_iFr3+r{FW+pKUEK<^hWje}53Bx%8ZIas*y6Tlan4&?4Z*Tn)#1E2I z;3UOudo;_}IzC9t7iH4posQ}3Av8iwnd&!u8ZZsw>2S>e|x7Cihns&?jWgMjfHr! z7oY^&G$wyr{v}*``1(^w5h3ek>)Ndbcqg}1#-*QioszhppZ(krL-sq8D#h$h_%j?@ zp3caI!PAP%1Xv}QhpxNPKGgZ^<;NygOL`Qpv1%mNZ#XVBwlE|_d19qd< zy&96vS1Q6~sgpP4F@cv1T@9P^H1&)}haG6NiL+S-`Z%fe7tSS>v6)@FoaX!u;KmHA z#kgz95z~MWU+(X9(>R&{?03e%-M@nOuj(=cn2)J8Al$RgQbUj|+_v_P#Osa`tt46U z*1IyQ^r?$U2jjXrj}=}0cj}st+~z7&>5C+BQ|5;GgD1DjjVYhv)3<|NM&r%XxY@Z5 z?3A(YZ-y6M+#~c3aJL&&iegmK%Fn?vg^7#@0Ifl#2ajUNpT!J^aOz`o25yeFWO(&Nf`Nl{lO`P^r>A(Ri<3bP$`v z>_Zkeb-OBc2mlI49{8o?Z5~}vA*P=tQ${t;XUV*5WFb@4F&9H%LtyrI*a6~hu{Sp zFKU5$N{vfbCz@c=!Qg;e{9xmO-^719j^~K{mVncy>G9)1f-}URyb~`)1CDkBWpLmQk!Ddbq?oL&)tF`ZVlO|J~ z9Y4}38NQx@Z-|TKR$wP;fdO7}E8Iv>WPQCud~(LLyX`rbgc!r9Web$=T>S1IHE%i6 zrLAY=EY>!YC$1KcR%H%9UI0>-!Fa4BkHU^YRe(bALT7HD{ZyzM)3CrrK_tmMz2;!^ zg)dVo!p(B+e%-6f0W{fP<9=rm*MCNsmvuhlbC&eoKsOg!gZ5gDxq&}lmN`&p# z+eO?}Kgaw)T7HT4#b@h9U^PbW2*j!5h<2i8RE{?C6GV1Rh@C;d+E=5zcI9iCl;DG*p9Sxn> zGnRKpelVEm12OA%Y*^DSv( zlw@*GDB8piS>!M(eR6p80fi`A+-$nn_IAh$$BCG483y~N%e{G2HcxL;V03Qg<&M6+ zZ`^q|Z!<`E&xk=($rrXYE`2xU~vf%^_HcU zzn~v%J%1K8r9DEY9%tu##XWQIOPY2pS8XzWMxL59KW~?zxGol%AZM(VZLE$8a6c>~ zFhiTeGaU+)Tni(S{(+u-#2suYvSOPrfvufFNBHqe$=NUw5i#Ez=R19*JN?EBW(Y2V zjum(-IrQC9@5V&7G8wNLXp`@hk~CSlN1ew+VWTwEoT0%%3OD)c%7K{w`BbSUrkK9u zbWi*8_2b!F1RNxq{<6R-x8?vpGJ;kME~N_Rm+uN^dJUN#P^4T z?lH?B0!b4bc=aoZb5U-Pi4R@v_>C_)mN&J(yK79Om98m~e!4iM1&qj-fUPXgkYw+1 z>>toUSQMluc}`7>D}EU=HjFpNaF%)mICcqh>!tZ}q4>%#%e#G=rI=KtINUtjb&`BA zkF?oET7+w1X-Nt3AnpvaLhSM^t8P}8g;rqZfyo)Y!AxOPekfc!pO1Lf>m|t>_RE)` z*JPmXAhJ!a83PY^v&yLTnL%!oeqw&rI>16-=vbT@P_asH*N3~wqObys<1Bg~d1rCM zg;{-Saxn|)D&k-OeZ}4)hHDftF1KKSC5_1lWktRSRoczWeKcmnJK^i- zGh6mS7+VX<*;$T3{2XLmjr^&mxhe7z9BBqW+Eq8k%Z3=xKsl64Vbg92Bd)XTwT6@+ zw!Zd`5VR1%k|$Z{qgznGopcy{($wk<>6U+AOv6bc$&6M`2kujR=yt=oZJBnU)aK1f zXuIfAta0Y61QiU8yt!3cgYOZ!7jb)FpwCNowh|AapitvaPqvotc+4GR#?KKS${j$I z272;b)!HlsRedPMB2;^}gxqpW9Gc3#hVaDp1vzrjQ6dqbBG78K8eN$HY=PciPwVak zMQCS(TE8_l_)R}73T_3t;S78pQwmTlEK0Q-H?jCdgoUx`f^Bf?h}W>{-mcm=JFBi2;WV`Z z@7@*wleJS)c{Vz*g}h~-Z}Fv>G=tz9vwTM$l*IR2v6B&gc83GYy+M;QgIXQh)eR?zi*3*0c#5!guoC>`C*xJ()zPl|JM{(b2$;T;10B>WZphU50k zoJb5Bawt$FR!L0Cxg8q8X5P)NMN~}PurvFEvPvYkpxugP1Ms0VHFmxvEP&mzJA*#f zM{aKABH3Ar7S0q*p1wOB0(K`=E#yx%(oZF2=Zp%C!2nNpR0!{&Syfq6)lH+q>8Bhn zAfn`>*_N#!qtGQ^p=Qh!%Teso6$eMQ7!s=Oh}=C{C#KwxF|O{YCu1?T@ZBtS@5jFi z#GdvO0gby8onVO@ZY3vmf5+GZOB{nKK?Iv4iZn=>xf+jprN`Emb+zp*(*rQY?co{F zg7#k5#2YJLJ5?r{>a^f8z9Haz$M!9ggL?)gQ?a@JthX|h`;HS%9kHqHo!w?Cp6C&H zq^JuJN>92@G{cQ9gDr=-P%>o_@bWq3dL47cd4LN!rvw}3C2Ojd*V=i^=CkPzr&)Gk z)y(IPU#vb&uN&L}y>Sw&!5ViS!unD(29H(Ukxv@#b-ufhNa|odFGL(0qgRh4W$qNx zz1CD)-W2!hVa3@5YvhUcf10qYh7;aZ6_o{Fqk$9pRpr+bOKMp)ppOOpqPNim8RX#Iw%-6!{X>&x;~aQhk=1NL1!=sDK4 z`Kj(XZH)vknr;xXt69e|pwV+T-uL8zH+%Qx3CR={1^N+3Em{-%O))f0S;op$!|Wl# zpI~7(d(zpZAiKmoq5CLQ8U8rP;_B3+@IDS*^CIU8Z`Olrb%W$92G&j|hoai?XEd#H zIL;zt6r9uhc+igZKO@aR?Y$L@(K@fl`vruoStM=?bM3C5j)jd`=YCj*kBYVm%hr3N z#WH|QU_ERy6?A4Yttu_0OnL39Q}F1wdd(N@6(P(1B*aO)hZf@lLW)f!cK%gTe|0_6 z_lqU81XB$+6Fuj&dQI0>x90_n&!5^+wk?^d!$wl<=)n;s07oSOC0~%o*g1UL;KcPy z!b(MthwGhr*Wu!gLRWwfp1CCN#a=nnS7=jA8S58?Y}VqT&KYB8*+7-A+IbXDC!g~R zJk~KDb{GeeUqoi^<0|esB2K3{DdVRXj~pG3eM&O%#+?lp=O;@Sw@*%kkh>?Krk|qdn>=Ki0O|TD=i_V4SIzGuZ`gopadc4>yx-xOf4a3hS=KHJZom;o zq+cC1lV6*GsmABjmmr?jfK~)4PWlF--yM{$7aEZ(&l9e)p*io&a%SL*;7x!E@3Gp> z046mkl>B0wYK;SqvelN~B*_52n@HjCVSX{=+5v+bjT^@U# zx=TW^V?DMo2M+wh{OlNfx_~Qgr`GOGj6MJJEnzFe7>Isl%N!Ht%uRmd#IDvsGM7Oz zH=yF^dWvZ-x?!I1+6^O}CnfKo>oPxjn)`T-q|rq6_>&NQhO-%!Ie@86Ahs|<9DS=V zgEj1ZeXxm9h3u|izA>85>J4#!k7+os&%(8N#;C5%Aium<0Hj@vi1c1Ba-N9DF;_>{ zcMYoJE39#D+#IKl*6DR+8?p0rxjgKxg>dVF<%v8#PPiXMRzK5xj=AR1!8oLmVVx!} zB`&RV4w^x{S@D-22C=Ap7J4EsQ-t4M5OgHCjo)Gdjc8wc_nK5tia*aclLoRBR4mh& zGz(rzfv#UOrv=4wcld^EVN)F0wvl?_L^*uAidByW5J^{ur^DUsNkOK8Q z4Qn1u)X4or1kyJgg~0W&16}<^dWl*KfF_5o!RaP+%DPE+p>u-ow66dM5y`r9000|l*>D(=prKKt9}gJ>d~a+ z1lUzIkTt`34EnHkp+1B{|t|x~tLTrU8qHY@V{# zcl#?ie}Rjb16iwZIb@HgBH*^5axn8`xw|>cgbeVhRGiE#$d!|8Jot#8o=SzDZdBwR zKbo3stHTkHR>bSOW|H44N^b_6}&z5Ho%9a5WM0P|uGcb;rNZaGHThs@T^<;-(Vr>%) zydrRK10wek@*n&TnWZ-uKk~+CR`Q|4=aN|8qe_792BmHtFCyWhtM8 zqef9p`59+tc`EMkgIyrzlEa8=-K|Z4XUT}#B}zKN4SIb z&;I637PN`DU)1uG3ByC;L&Caag0FL#_I*NH(q5dNK^idCsG}@)D}$|PkJ+>`0IRhk z7Mi2G9&u;WW-vWpqhzW7y0UWh@FSeHCAYirzUz~C1EQ!~lV@IC zn}{s0q`6{O#RagNNq?&A>(rX033D@()pnGJ@akbtZ`SAyL6*P0e-&48{vBfP=;HVt zc$&#-D)?ai*kS^?cz`FvhM}H$DoI}1)47RL%f2Oth^D;OBUcU^Y(tzEI4c*rr^?ryoY#-gBYln?&P|B>KAKdj5ymK2G&_w6=<)NPb zvqt*hs4!p{`N=+lM|u zF8rB*vR&;X2%7socFP_-PXmcj+E&?_UxGxMgEfWgke**fOau2rvf{uB-`)2xq9f@E z)ZWyhZqMK2Sk2IeD-438jZ44)`>B|_yE&rLj`a(KqWu5nnE zV)4;TMr$ zP+)VM7eYD2H6&sGoiHO#^H`xPdbLUP+2buVA6OFWdPO$=9=gbHZT2qhB$6mT@@8fB zQpq=e=n=48d}_$TttcKfy>}Fc=|C%Uzco@t6g7l2s-_6;RzkK7uj&3+20TIA!+*Oe`6LVNGnP!{)au-Z)-5aKm5S{;s)kG>-Z0Ih`+}U z|6vZ12bBHXg%uN5W2bkmM9OC&x13nZC0X`xA zHGm%zqk{wtj-LY~3@{8maPJhsj3$sc6d~b^D8$S<0S?eipvz9N8SR#5;#06q}mf~OoRJ5!f&o!U0kkuGBjML(bY}ith#_h>w$3zH4xC91DLOV z81Nw-1UxEW(uSZ+)eE+~?@#fs>Tv6zgV>nb2=yS4EslO+O<)EB*0_K>Dk%Ym&p;^Y z>^kuVZ1Uv|2Ig}i_{e)fdJBgLea3{S(SZHm-lTuK9I%762)bUP`tnYibMjGEo z!!lvD&lhwc1SDkOFEju>NLGUn=D?b?#(mbLd&y6=V7z$7b&NXpYG6I^rv6PHz8)I7 z7Z3p9wt|mOuf@CBlwUxARKISkc0uWe3;Di}dzp$Xz1DY}@q!&grufkAg9CPTR(gA; zYEq10M%vvzv%WfqKN;NZ?ZGNGy(hfV7H43uAYEUuZkVe|2}GtHw73f^`62j^E|)^CHGUiX~p%{yxWqFdtYtK9A;F%wHBnS&KjJ%XD;; zzv742wkOcT3&o&O;ol|$AD4yP3aOeC)Y!Ck*8N(IYQpyQW+2PiGC9@$*+WYN^Ldas z1r5T39Kwr$pbOlNU2y?oa`QXLql=u$nIXj1Srn$`h+s zb2BNafg~U@bM>;SOI#;+?4by|wF9SRb)3m1#W(|Z6+2an2b2%3J2+bU3mr*hp8Ap! z0D3DMe+v=8P>trZBh~y7%|&Yl|Lp83N-NxAr`v02YR8GT?E!u-@%Eqf!aLm@l|1aha`LW<7IK zy@Z&hYWib)(&B`eK?d8i7fd{sypS_Jx)2?(XKAJ8Tp~rBGwvTF6%7HXv%6p~LVlwB zh@AE92;s4EWMRPwnSo_j&lkNX_5-G&XFPeT2-HnR`t>qvU-$wt&SMdLO@P2;`8ivB*8seMZJC1aJfX5t?~X zBnr(;B`2i;W}H46r!OfKot@!kCWq|q5L@?om~aE|*6-rSRw8qr7ixCub@VYe^myjR zTqr>-ehWEau_H6d4}c3CQ_eir0Rs&|>b)brbu|87TLcH(d{~waJkv2|EvqH{e)y=u8PN zoCC;HPqKv-8vyz!Xc(yHLih+zEr32?9ob;-99x-DC5F*AKpw1J49DXgey^(TR@`8|d#W&e34AWP1j1(_TFh%WP&FV0IAZX8gS=)-6x_t`4zIH;1vb!UoOj6Ow`Al0}=F4~6 zsQo!MqF#V#5e|BjkLM~=JWO@r3l+}(L1R2rJY>Qyz3ecDrjSaXi(FG?Hh8?K2)%iT z){R-q*y@J8s23FKi{r}`BpC?sWadvM93!m>8o+b2g~H2Z?yt}6d;=QNpu{+TQLL!V zF491;=GqANgCs2-E@)e2x>4Lwr*on|M~`OuDE4;vStLTfqDEH^c|wUq1`q3+)sF0} zy|{6IziMh}O(Z8Ava1ZSOpvbCR!_Esu_g5{e=PVkM!9Xn-p;P3_^B{?rpc*oW&U`~ z=7nIcUptaS9rUpk6=JG!FBd#|YsMZ*qp5pD*BYF zq*i%>YrhMt%Xae0uwU;WvI* zBB?h9zd5KtZqr$tY}pK<9${Q%!;IpovsQMQD9~3DE8#jfG{H{#o*y2YgDevRJSoMI zrj_V-gvBh2zfR-11G42ts;S5th><*?;q_>})MT;boaD$JT)Jv~sUT3KJI^-?s88F} zu;zH^`-*g|wrTB2=57Q%*qdH~w*f4vq9M?fk-5j{NWspPII^`N@5}S@&O9_iN1|Y`V=y~->(%Cy5>afl@!A98*l~k!AAq1zI5(H9!_etAoL*oYch;jj)+)WXP6sG6?J(L!ub_J? zBkqMVHmfn9)^LK`Q}+Y8em=kV2t%^JSd9~~^i2uvIH$gu$zV!t_#%I>|C`~B2zYRf zEGtFG=}cvcsp?R$IVMdgp7{Ex@>_u;Nc*O+ls6V(d*Vu!7X15^Ep3(T3Mc-nAHn7c zk2Ui(a2SSfv>PtAHO&51*I@!l_eR^5JE*Ysx2U_+%Zv+a38+JN1ChBOrDc73BO~fd zJH^;AHL+i^6ZD*a3Z-BG8oL$!IvIY^T&4Wn3J$AJ4pM(60Iswt6y|NQ%KF7`+QQkT?-(yL@L#M$>kv{|YYmUp3165);u$Q4cJ1uPZ zFh?F16D_v-iA~JvRhP)Xgxiku=^&JnreSgVY^z zAlPEmWV@z~pLL)v6Xm*G+P*;!MVS22NU2r5DLy)qDA^bH@8JVjp<^p~FP5Ljp~GhV zUD721xhb=V(rS8fAeq4l!8ROn@`fg_D+>6D?>uir{ST2|qp{#cATOEr`fb|D{Hp^e z)v0V(+A`{E4g|;(1v9JmlbI8|7wX4deo*FvsTziH4kg0nq=04DJ1sW*k(QJ-Lx`oW+5vWe`C?G!2F6D?Svl=WN|5yFv2S| zZ+?z<4Mk}#-X`i|vN$uXyg@x<9H)s&VhJ*xHg15uOI_|YNAMLo1(?;1*5N*8@3x^FG}FyJtZ^x){sGkD zSxx-*WYQwi@x`b|k_WL<6ys2!?JI~vL8uJDKn-h3CI(T!w+QbgW^D4p_2-P3>v2+p zchU_9N*?W4$V`!WSTI<{huIT4v+6LI!XA?}1#(B7R*%qX*4nns!ej9ME3cMy133Sf z;hY|J_8AJgEB*4E$N~2ktx;7dE*V8V_rO_2t7%60lS;=}5YmT2;_{N)wI5~=@LAan zl!pOCy)DY)X(Tg*qg@gk9;xySA`&&0Obg8VdE_6UYHV@D-UrE)w)q9(MfJne%y~1; z&7m^dG*$AdJ`iNmQUK)nq#MTx4(O<7OhgT|bb|79XdEzK>@B#$BD-r1d|#E!7uL&4 zPZ?L>27_B{+s;DCUqVzNc*BhkV&C4}@!Lz`4J^%N3K4XytXrY`u>zFs!Ythj7vNXm zU;Ei7Ek=8Q>Vg%U75f(DDXmr+()z8M`IMt&Xd=mz5%Ji{;dD!6!M}Ruy1|g!J5Cvg zG*Y-qUnPG+(scaBXeGBUvtX?J$Nsb6>fQ+a`Ee277*GG{Ob86i))4x%9_GmzWDu)Ww_h z8nA4b2kxQ<#hTw4(IQ0AxAE{G@OvD4#Ed75~wqg&Jq^A?WI@X*0jBs2itt6IvLw7cgM*&AD|EXe5l;>o6RKWmZU3)mu%> zG8a`PuDAH^TWJX9MAaPw7zb_fs|C0lN(v*R=rWLu1X*Lmuj}-S`saa-A!F7=YwQ`1 zV)$HE%~GA(F)YD4bF{q>$6b47`A{WQwQI_XD6FW*4&pnu;XY5TJ6rLPz`+#`)r-0r zKjvhTY076%f~@0J2I86p0pFv%8jHeQ)(-#}rcCbjpb6(jA(^vhsedZtQ7l zqq$}n)%E6nZ{6;kuEo`eHblV%MQt&L`UZtXWsKWcD=FnHTALcqhxnr(pdPlai44jh zrHdw1*6$&j^RxADTC5*vo)cSgG%d4FX5X}qB8*vmD{?1kD4qmEvUsnWBarn+rtPI+ zB8!(#)|$xM^n-Ej+Dxp1UC*T$+nLdszo0gAAWH` zW|B=;{oDtRjomJn3{+!?76=#2hQ;yN)25ZZ(BB0zVM?CW**Js%Ibt>!>h)>C5r&a9 zA&${YVhI!Eq2iJ0l^BnpgJDpUPs|6mb&#Lefd=fL(8|4WP->}=NvCuj6h}7o|6P`&>7-#XU+ZP z%*mum>hf!ld!Q8Mnt6k(E^7Xo>0ZUFbrf#S($gw9LxB$VTr(|5FfM&+iCj59CzmH2 zM!xd3g7(Zt+3W^x!V==*Dst14?fH94^~N%p-5rvLb@EfN3_LTIEVVkM9xAhfgP)tR z(wwG-ZY9-N)6+iq`w7QrqetdEM}IeXs()((F$x@wHs^~9bD(MHkUXZ=tOUqFJfFap zs}L%CcGWx+B@ejKmnQ^r$bF$s(0@?3W(cXt#>zKm^N1^a| zJ(YLHQBmKBg_aoVk_Odg4IZsXqXQjG<4X|P7yC&V`y%=rTQV^kX&WQ1djiBix0|&+ zCgH%r#%DInv=4DOyPueWLsZ!~P>boY??J&+*lMHEqmB6h3v z*5iSBK@M$G8N_T$v3N9M(3A@kcB@zlQE8AA^aia8vU$N$V~ov<_wUo6Rh*3Qz8_Ao| zwZBm-3x>NyKiOsT!M+XK@p(Opb)(VT)aY{8^APaBtlKU^EY5BOOe3|EH#N5sef!>< zHQO40L#LN@IdPaoOHMU(r=|=$(Gqnw3urygE(6FIt^72cyP%n0U0P1_?PxSCzmc_~ zrP-{{1KeYnr7>nx0F<3~!abg=iWQkI05qR`6X8OYIB`dfQWM#lI^OPt1RYvC^Xz*Q zIe4wq=);biG*SMJZGRYW(3JF2P5f3aPWQEsWvh_)ahOAPh8->SR3%m2eRWkK)|gsz z{*>Gsm;K>evifE8W~r8_b-=26L0!>x(NeI4Xpw`h=eOg1&tv8-y>sFXIQi_p!Hfpy zK^cl32DmI@F5jvKED}&eq^XeSGjKbWN+Z$D zL?W6_;wMvDoPATp0F2>S6z6P8Ar*D8S_J2=K)W3sY34o}PS*zE9%JOeiSVTY1Nb&cbPX zQ~}zo3GzIX(*8)pkPmUr7aY|Z4h%rFn#oy!`+f?M<)3w& zTz9kBSaaDar;?Qg9#`A>qh+UNsPd&#%%~DCF&u5Z;=6&E2Y_d}=e970eLXWH&$3z6 zPkg_kHK}vQeT9c@1i7~@?BGG9YWHwL;RN+V(fJO3_U9Yjy(5(exm|pU%@{4U}lg_{l>ldaWJrtvQG><)rfMjWi(=)E*hr{~{XQ2m86x)oxH!Y)8 zdHinKY#1JCda+W{Rp4(VOCktaizgz_=-ccGr;?qY=0lrW?8tcT1#bPb3oCfiaRYAv zW`QSL90pU3@qT3cL0@CPzJsY}oq5E_{fa{ycGaIjQ~+nJ%Xa1}U|N!XvNO7ozJaec zEr^t}*(*t{qxUUP1hPHZw5>WvX(L*aW{H^Y=xjpDbDijico|NzJoCkL;-iV+b39LuuyPf~vMDR# z2zrYW6UlYEpTM-GG0Ayg;)>(*rxyeEkEALbEXiLjAiJwzO*T48*t{Gg4d0TiL_*I*!$CWa`rFW0=GR}uJp|R;HOl!U z^XCf8_DpB4kBjoDapFyL+xi+E_?qh#3=a!26!s*xGci4jZJT{Q0K)*k-u#<>9_ybP zTmP$m9>0=+po$o!B)@{%f4BXIp8fYK9|nAOCZ^vDe*V7v=hLI(@2h`4FaG8v^7-R` zv-OAR^U;6P$kSz}`-gVj@9x|`wCmXZ^r`;Rpu=aSr~gYE4*#qMXovs1CFx)& zWTbbRkx>vG@(#caqZ6ZZtZl5f>fwBBZhd3C$C#XFZA$ZbWW08>)&A-##(92! zZ{>Qvd1Zd~_=<4jrE;Z3j>{W_AU1gR5^a3F|`{J^kLrJi3 z&%)a@N`H2OsG2wZ`bdozDIhwmwWO<#w-PU{(i7oCP5c;GSlxIXa>D@PK!sV4=a`ep za$TD^lp6vj0o73cxA=qgx<&!9*NmbsU4(uZs-DknP9Xd4B&AMe3z~PdG)QYs%r*1j0<)FBsK}g zo){=OnEcp@^4w?&jbxq}-<|cMcHxK(s3{_pIkkRQ+Kx>w45WG&bPwoOh*gY_evj&* z8{>hWbOqBwU5HAKno&S(_UtIJ4|E?GJF(s=Rw~6@sXz$j)NwknM`0;^4u4>uHh@Sb zhalX^9Q%>noSHu~QW#d3#E3B5Niw@#$t38J3L6NDuTi3In-I_OWQ@X>Z@?sPE>+66 zQ90V?m`ZSL1Z}_ATB7KHC7Hi&34yjl&qa&Rz`Uv=;+>GP_xoml;SJ`1H0@GD7sJd^ z5xHKAJZMgjIkZCMaz?sdlNz&Ug`8YR&uS&;#hlVFQR$mphA-0LO^JKcA%5wmxc8qw z1IMbxJM(+f`FigkxeU*u%e(aXq;d&9^eRgdfpILCb9WK9mx5`8QvnZ$gz>x^kaw+WRyw%2k>L?O@g|Aq zUt#~Vnaf8PB~ zuAQlkgS8z#Bl{oVeez^y?;vQRYlqMNX)`3L`_~J4hEIf5Obr}N?C}|Yzi8y4XceuM ztW19c_xXDGXKH`{HfudIC@!wQwbk)i*jfHxQ;SFB3q`OD8Ovs&o@iFDA}Yy>C?*RL z{Qh!>g;cK0{Y5HkbFA$e+Wz6xk#kz9u@UKRXgQtRIk}@Q{_Nobd3}9f)78|1lS^HF z*^G;uw`Q?}ve8NBVVPvJ?(S-9D7}G8i-NP(iv;6EuF=>J2y7Y@u2+y-#v;!!tLPwVT*ZoipyRGr_Q>v+M-T`JyW~5?yTHeuGNqCvf%Sv z8pJpyM|O?PBQ=%_o=;}E;2V}dnf*?)d^?Vl75pC0#3MAD+)?Yf?p6I#HM2H-Z^wZ{ zZ9s=xp>78)iP39dpKKJ_g489@u z1>ey}s72$883TZ5au7gS?v%1`cQX*3pqr%TBTSE8wZ22F7!VF5dZ{syj)}u`I9R(fc~JS1>tBTF&$dyW~tRc#oE&xzOcYs#q)i92h%pgZ-0p)!s{ z^Vy#ZrH`-_Xo>0FH6GDj1h5HwisvNAOhikQ6{nI8viw`cOW`JcC##%*7S~R@xy=z? zXBZv}m98Wv{j@pYypD+;3um{I7rH+y^_>wgUa7)Ylt`?dN27VPkRKLjxvMi|w4Rsu zkkpjlkyO7pnp?k|&1J?2^L`K5J?LMRKzIYUJ@@bXS4{srYx19A$}cORDk|}bsnlQA z^8F*FOusk$eWLewN%YA$hp1MvDiCp>&0ONr>{*XLhfzhUDGpcIc|IGYWT}bMe*76?iOacI_HN zxpKl)<;@AA2lAKRAt{{X$!l2-E%q*aEhL=)V34xVC16N^0*GuqI{;{aG@l4yKG;}y zK0q=Jsz7jgaM6NM&|DH=9|u#2984%n6!Z*A0LFsBxxh($DlkqFv{a(0Sf5xo5uhLu zw04MI(=kIK^a(P68@des7J6N_;t4=k^obmifnJ+bkQ}StW_7$OXBNDw*4M_t1_v(UehKw;U((*gG4~XtCE$6}z{$hq<`;Rmp)T$;pMK zknWbj&HX8)KNgHWg_J_Oe*8*j_(#J^wmD;M2dU}JbX}I8=h9p26L%?G`&RsU^D?~D zZ$?>rH|#pnjT#25%*Lpn44)Nd>t!Ul>?SO8O-{=>yL!n}v>d9BODq=a9K8{G8)8r) zhEIwxX}&&a;+*N~TtDA1rSz7qJMyR19;at=!M+tMJbsbXeqOnidg;EfPFPO8dvaOB zD$(ImrBSnVYRJ8NZ(~Pid7Icu{Pph2B-v7fucV}sK5MIx>NV??g1_%E{2Ba$6;*ts zOPKUKvC1P!?Av<;QhWz1g`rNOhg#E9?=mPUrE3hVu4Bvd>*vRW4oBLDLIpSV8zvI` zN;4&x8~k0vxu!(~bl5{t@oo7Q?VL+P2jiPQxho7+DI6#lQMs>i0Q6Rf(^eO~?&4wX zPj%=_dC|pq$v%1>bYri!F1CkVnB5(`_aSm?^DW3j{s1;;K=^wiBpor0Bcuu7MMj-N zexs;et|9F2pqlA3bPjfe1z8tjCP&>GpIy}l`t{a2pwe*@>wY&QQho4uiI3l={oh_nPELJ2dt zA=t0BoLBtOPO{|?i^v!7M>dPZpfnGBMDNAubZkGw2@vG$2m86@s zd2P3HJf-{Wogcb76n|>29}~(AYo`ltu*}@Umoe?263^up(dw_z zg^~cycdpwcQuLw}h>?}oM*y0K^#(BzSpd>Pp!s}V;RlL>)XzMfNP*_4pHZD9)bce- zMG*0l4we8w3xTy0?_1C|D3dLQBB+DDA{;s;75!mYVf#Rwp2fC}!n zMN}i7QX`OzlAND>ZzV+IUD3qXxn6M!*pHCc_9gK#U}u4`1%MYLWEZW|YtBe^o8x{j zR)iO=q0N^h+Z|ZFo~#C{z-}<>DA%8fDftq7kpZvjuvc~1n-23!chDJls}Ao{$eWJ- zy}#5Fywm3s%6ot2GjQ|IXT#oNOz#C7n(jb9q+hXgHM(?K z$8%2@V7!F3Z+n-46^c#QE&1*>;ng0u3^Ie*JlYewy=}x}z5f;1e_yxypY&H;QAt@@ z^*6AeyPE%5e*c=C{+-v~+nxU_kNrEX|0=%!Y+1|t8|Z(^Vp;!>i0^9IC#?c{EwT9g z1ixwB{butI;;Z&w;yeGZ;`_=_#B#^x?xsiUsZufCb@5ZF!z_KfJ5I&2o_Ep6z*A=C zpxu-k^riZR^~P8p=lRLN-RfR&R+_EEdc3+Z!P+R5x@MC20QHsSt@L<>lmwUkutB!Y zaREnX3t5tuaLFN|-h8E{lT26r2r9t9Q2{nN_N5Z;o~YX8%@%V^cmARUUuyYwUOFr2 zz4zE1qG;07^0~;<_#r!RIrI6$O#sJuW!20bBu(Fdnze;Zat=5ac$~ih#HXpg{~@(1LY#B z3Oosc)`f81<*VOQIcaFzh{{nIQ%RZ{qVGXB@#^AhdohSH&`Ccc<|72knDZHmo5_4eYB zf5Y>iCGbDvSyEb2;Jeswozo#_$$r~e<>;dsmRRw7s38h z{p!yL*#3qy(_f6@@&A+7(7)!!C<3)N1hz{J6!T_PF)3C=qOyqpE;nA6RGhyU`y)5L zJ#VHQCwgn<26c8;Q_qSn!pMp&m1Lf{mpQ99AC1IV@@$z{kmBZ2{nbJae>f6Rekr!% zF&9~^gXi>NmqhglB*<&4i|7)Fky~O3!sD+Q4dy}PG^=Orem&0MY;{iX1L`%cx z*#@Tdh9|o2zZRM4r0zB~1Sp+eV$)P9vUe#JrzuoNrt~oKyQmvLC4R2?D}H znr~rjW3%Pqu-Qm1-8G+`EeI4N&)g9q*+#$*l;Y8TYBXCv+ea|3UDzt7OONXpf(d~x zf9_!rRYbJsfSay6m?hVk(p@%q27hJ##T=UyW{E$pksaIT3uI$4-!#j`PDX}#S}E?> z=O;`?=9QBj$)R0FWj}q6{ISTCHI2s~G`i00m}k|!rjg=2a~-En#z%Q%Ih@@TAZ3$s zf9Ar_*`yVpNZ-58lupJ^d_MZjtD&=t6JEFqdk)DHd1Q{y+>9@hFTIXk(y!vQvfF>H zIo=C9OjfubUG>dJHg^ma-v=mN>}LJC?gY+HCVYhcf2zCka45U?PegXvLSz(0nC6*% z2qBdyvQx-Tl)X@P*`ozn%9brz(<0eIDcMu<${w;C%h+Om&$N6SGw*eM-}kTI^&PG& zb9bJz-REA;=iK*slHK!S|3;q4G2eeC&uS+P49_Tm@~ortH``+r721UW^Fse4(Ez!H z{w4fm`{SQ`Yci202h4woG!FLfM7l?ovtrVqIFx4m6-rS|8_G(TX+?)yq&qb?R~TLY zl_|_xz-&Q!ci;2fCFc7`Loiki4Qq8=idNR>}=`&_s_zjkC zfrS^SzI|2Tjc@tf&ern=5;AM)awb*Zp7)5$7q8+sJR@F);;qf3?^5=Ov61t9n%dqVt=xEg#lJnNRgcczLWV z?BVEvp6tl1@;YRQ_Koqcr?bHT5I<{QxK8WA*mnY|JODNT3!0j zT>je!XgsTF2WaX0GHBCME_g*U7Bdu!*3hlj(i&` z{??|iF2g-Q>zOLPnD7O`ktCj`RBZIY-#fh}iJ^0cf`g#ezWmZ`+9kn6)J2%V1E-|y zx~h~51%)|ZHlE*yhO`-{N9&3i(j0myc}a3~p8?O^U?+*uJ5Dv)#(gXTV(J;r$lKAz z&txoy&J_UJUy4tDWg6AmdmMkuu=7lQpgrdyEMvLDJ5&E!W0xpdW4EZvBwVajT2g`X zDoeCvznjLY%JiSc|B$hE-wMSUysTd2W ztR8+qkEuhPY_whQZo0Ixd4m6K@igyXlTm=*yCnXvy7!9AuSd)ly^-cMdntHNCiO0z z<|LK@^WH~zqDNi0^U0UH8jdfV)bUC4QjN9+C!2<#RofVk8+*|$lt=2%#aI>~v1PYs z=_|f|HgI1xX_nRUJs&d|C4?`DcQuZ7fnI6`Od8l426Tv{0*4;5_iM#%%z6;2G2O(f zrp;wNLiO`*VxKi}U3OjZ?G!QXF)^rRlVkt7Vr(~2)QT9of4v&;iqEcSJ^@~ih}wKH zELmQ;yVonJ#mqnc*R1)i9OURhx{n`pJ^O* zJvB9z6C{oEGdN}Io&ZG!$N^hUs$n1|@Ow}2b4O3^L{P?;V93#j|9OprxvIe7lw*ER z?W+P?&hyxX_Qxj@R_M5&jHF_PLpN6Rat~=Y(&sl+bzn`X+FYPh=-3hSDe{7zblm9@IDIGI5vH`gHSsjkG>kRtY&0B@IX*U; zCEfRD5BsD1*=X7Qvl(*pKi1NuF~rj{`cLcxKJKkc3mFvLbNFQ35OZaghD%BQ%`+|` z$Jkgz^M_<0Ho(2CzGH!*+qa(uwCMczfY-_t&|;lkqqi&X3RMk#W_%%$hNEU(I=$g* zidUO7En8qSsB2g}J|WYN&4Ic{(EL7%Ew}4!zJ1UOL7xvuVZxHGgNOXcO!c%o5d4YR zBXjSoZJuhUT+{)P7a3(@*HnPiYjp7)^QLbhMqJk)+;Z=Hd~qlD*o7OwNQmBEO;{)w zE+VErBie#jBgpd=uFXW9u#M+agk@)YlI7ILrgSm7kB+>JX3iR`DVB{+UWdxYQ4K8h z*Yugcur9?hTxDC(m^lSVo!SrN$FYXJtJi}>>I>-zcQo=hjtPjD38b9SR!Hu$yj-_@ zy!J@Q#iAXyEzIB5Kh;BWhOV(m>0a|g**skN>*IjqYK4%eaRX3hG&8ByZ#t2dJyK<5 z@!scbL|3fY83hMD3%)COUM#kwad$$O$&)}oA&0D2ui~y97!Yr$%6LC};Zwjo-BniG zuVFDyV?Ve?xw$w*kIP=dGNi6yT0PY~RzmWo{EAA6mG3r}tqIjdkBODdo6FGk{;qDK zk`0kDZbQVK;0Nm_rZ;VJh^{LlyNNg6l_Ezj9xESle}r!p=4hYp{?_s#)|AJ);9FMB zZXgOzNE0S7PGu;1GZoE3t9%_Jh_qTh;~LZ&2O!_)|1*_y+*k<=kolo#%p;`UswOk{o+reG7(X+;8_@ z(M6Vh9sfwnEHjqLFvh2}-#o+;sGj3okSe`vK9$2FfM>mMy(2@U{KH73?@JRdx@3|5 zn4Ge!{APsLu9BnG9Yc6zC3jux>+cPn4fgzIuQx=RIvR&pojoJZi&~W!hbvTN2HF#5 zdrWv1Vi+p24p)j~v@I4S=slku#urZyJrOpuEQxgm-`Lum7iLN9O1fMzvO1DD~V*y>P+J!22P=Ji%j9I`MHeK5p&Ly%9|@b%dvO%_=7${X|? zEX>a=(imvur?&Z@rE?zmD#Q{DWshXP__PyFyT^aoNIB-6{6d=1HbMGiP{Zt+$hSXo zV*vDSP_?q4qf>Ot=mUcrYAxGdc53qBrwQ-IUca7kiyZ~L6DL#yx6j>98j812a8-3t zTM4mHSY?pT8mYXg^jM7f_`dO6>z=*)TRsX}HnKfBnsT09+?7WpTvhn>!2||;pj+C@ z_^Is@`MeFe4^R!CA0?`&G30e0jIAC>n3r#e?VmSw{@@V3D{M8Nr%Yf%ij5#Rn)oHz zLO)5v!caAhs28(iw61LFP4dQNvWoY)o(fYme#9mV3M_ck{!Q zPQP9i)X!bnywIeLfBNWE_q-Qw?n$(zbLp3^^s35ErBm;H-qpviKjSHqtj@UhDX(67 zW=P7}IWrCeD|%_SorwK(GAKxD^>mGTw2$iD{*U=|Ba8(8mbW`a93*o__{WAm34QSR z;>@bBJZ;b|qgDUQ#oev4eD z{U+BoKJyUi+``AtP-`4G`MKUd(>U5EjLs-){m?knxF?Dl_ayDT{-_+%x8Fz!^|2#m zI39-F2m6hbp#I&qiadxG497c2iIimg3Zk8RJRM+n7(8}t^%b1)t+-@ceqs4?_8#2T zQkAPS>T>FSGvxy_P>1&m@6f}gPvZ)*dk4cS2^U{H-S<)x)1CdWfswHfIf6ge&9o%{ z_H#@*o)a6|Tiw%NI*C71IoZl^@jXGM7yToQz+|se z-V^zNDA5G7+~U=vGg=ruzri{}4V04 z8qM+QdsdO78AfDkGd`*3uauWFb^#2-ue_VVP#@v@n`6E29SDEng5yo+DjT&k`E)(H#Oc?%C5jqtpOsd*6i$XtLT;r;TGKN@0ovq7EsJrk zJZF~^OuHD>#LfvhXp~`3SbUDsv8%uC%ETtWIN=>Ji~*x+=9fhkIL;*kmSfr-IYWZWeCCXG*GSoaSc&#%tWN(z+NnwL1_toZPld@*{y|=0e;IGb-&yf){x2 zXQk=4_X-RMnC8=7;pN&-uaWRUp5}Qmq_Pb$6wG+Z<(OAZLvU-Frp9;t4b*`Y=T>7+ zMf>rM%(&${rk;x9ZR={s`!r<*BO&Fo7g>cmUh7%IXbtI!uW=*ulLV?h0;YlPpz}lOdf4xP^%qv zy5z>cu4%Obh7_zd5M)MiM_T+bgp|)LZ_I`c$rIY;*%rScrr=;(c z@zv)!O3Asu>>#`xtz%#sbBO18En5tNpr#tU>vi-ugkuj4K5#E%=MpG4Ja%({i@W$C zt9U##oq1Pith39_qqVO~-ULSmiG?&{uOB-naks5arKm6mYswuYp2{ERcX{KwoPm_Y za2p7MDoIOnu$EUa%v`N*MAMZ#d(MQumQ98#eqZO@|FyMOMIISG;H@9zWBKv?+tH1)U8<&6R6{alLfTEG!)?^7~GE4)EE=GG5bIVO^`Q7wI>2 z8)90{zrK7YzE9KGOK}|hi@h?{{p&`i!`ElKe^CK(YdJ9-lT5D1rjw&3aQN_+JYOh!@L=k`=Am zGd}Umv!%M;sacVe$%990m}n3zqmv$j@ncn2g1ACq>__;0JS>ac${pl#Yem>s#%JmE z52?=W@((J6Q;Rm7TzLMOY*W04cPlo5rb0tWm)((>ZSZ}%zs>e97~>Bb`a_b*&nvOy zhpGR=qW!*m^PEUSd7?{ARo8I4c~y@MwBeE~tGaKuZ*I@%j|g4$uczXk9cY8mFR*Z+ zQxk+IvL_B%ecqG1UHNeywBbE-^~gID1|BEH(DoE`b2{7Y;O71>qcB41+d0vM>czO} z$Y)S0)@Jgfa=rNto3hA9ofOgS(eRL0*aPKd&QjQrbkB7s@Uu?+BanE0Y~< zHCYj&@xj}Dx1pR%{rOREd4OCcn;N|~gk71{P90N~icI7BG<1q+NFRhzX4}(n&PA3E zJxB^NE9@Q9KGFTco`pP%f7O_s^nSx?F(jRybB+@eWm#L}BO$A zBx;~gxi;9J$*r7mfR8V8m-n%Ib2Tz85*#ZDa{`Ou;fhryqxIY~Ml7=jWGYji%=>nL ze+hdlj=x=3YXD{as-pe;O$LRv3}JlYa$<{3w9wL^dH&{O zvlq17pn1+(PBijL6`1z1iCRunt8CR6^|5iCMd)~@W_?+g<^?VHqJsLaE=*mxKR1mH zUsKm4Qo7Y=mO-4|T*x1`Hu1)E%)RTf)^$apxextJ`KI9e4(`_$VyHzM`1dY3#U+;% zqD=)mqPM_~BoOkqX#euw$wd37$tSbgWKZ)) zWmlgCy0|OHZ}U38nv!9Wa_Bu=v4Z4}yeR6R+QBpQfbPaemANtq-qfvA+4Q`xQ?pV| z)uiAe+W)pRUG&qQ_N8j6v%xFoG2CN^-FG~?dBDI-!v$&bPV4S`pjZ|%$o*yWj+2`l z4-8cG%yf?P*DZw=&uZUsA6ZW8+&HoJ{Xk2Xj!=ETbSqvb(fEAjr^1)8a_~Z0Uap++ z%XtfVbJraN`U6@CXXokc>pG(TW*w35o&K4LpiHi$SV&Z85{i6lNBZ)69Z`9I*q?y* zpLGOKJEvOaw$^T7il&nXnDzxm%c$F1xk1kUcn|{oXxoI8A^9r_1WSFuuVU_Q?&xI8 zeCS6_kRMHKF-zp|nwG1Rm4_vmfhlpyUR~GP5aMiQ1O6ieK#?#g90}%js(_i8qzp3& zko^0rz0;+mV7?|q;;1Yf21mj$FaUtTF>tsP40`|uI{=nD);7#A z2o}W*`_Bi0MxnslZEYaGXmA9{gC~6;m;OV8qd;bDrGam)2f0H|15f~t#KJ8!Gy;YL zH|$$zC^(M9s;x8>hLj$#m4?K^soKC{5g=2ymcycPAlJ6i&=@3DUr;a%B@JvApzI$S zh9ku`Z>bNB!jgbjavBVVz@RC{0n*SUcw=ihI10=%-a-S=DDa1phJ<0M#*4s$+Z~E$ z;Q$y_y@dw0i^Eck0|rCFNOpqs`NK;j*h4BBoVw3QI7xUYo<&oR18f6L%8#Td2QrD0 zhC)-XD>Q(n8V6V%)tsXLW^ABPQMG{qIYQYk=5Ol5V3CyN04xqpIWGXXOey;ZM*}3s zb<23+Xe5k+Z!iP`oM?)22n3pPJ;RV7k+LsH1R6;>rzjZMFhzYR7#2ZEL!-f^Kv52j zM&Kyhz#tJ+^u+o4dOIu$KU+xIni>TwGQ281w%D D+NPzp literal 0 HcmV?d00001 From a45d0f25d32f498c14d22f4f37c722153acae34a Mon Sep 17 00:00:00 2001 From: 3pointer Date: Wed, 27 Nov 2024 17:57:08 +0800 Subject: [PATCH 06/17] test: fix the data race in restore test (#57762) close pingcap/tidb#57760 --- br/pkg/restore/restorer_test.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/br/pkg/restore/restorer_test.go b/br/pkg/restore/restorer_test.go index 8e26245dabf06..c662db6bcb803 100644 --- a/br/pkg/restore/restorer_test.go +++ b/br/pkg/restore/restorer_test.go @@ -3,6 +3,7 @@ package restore_test import ( "context" + "sync" "testing" "github.com/pingcap/errors" @@ -65,8 +66,11 @@ func TestSimpleRestorerImportAndProgress(t *testing.T) { {SSTFiles: files}, } progressCount = int64(0) + var mu sync.Mutex err = restorer.GoRestore(func(progress int64) { + mu.Lock() progressCount += progress + mu.Unlock() }, batchFileSet) require.NoError(t, err) err = restorer.WaitUntilFinish() @@ -145,7 +149,12 @@ func TestMultiTablesRestorerRestoreSuccess(t *testing.T) { var progress int64 fileSets := createSampleBatchFileSets() - restorer.GoRestore(func(p int64) { progress += p }, fileSets) + var mu sync.Mutex + restorer.GoRestore(func(p int64) { + mu.Lock() + progress += p + mu.Unlock() + }, fileSets) err := restorer.WaitUntilFinish() require.NoError(t, err) From 3a460983815fd65e4b49d3a6a62261d7798e4455 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B1=B1=E5=B2=9A?= <36239017+YuJuncen@users.noreply.github.com> Date: Thu, 28 Nov 2024 20:17:32 +0800 Subject: [PATCH 07/17] br/stream: added `--delete-compactions` to log truncate (#56761) close pingcap/tidb#56758 --- br/pkg/glue/BUILD.bazel | 2 + br/pkg/glue/console_glue.go | 34 +++- br/pkg/glue/progressing.go | 19 ++- br/pkg/storage/local.go | 2 +- br/pkg/stream/BUILD.bazel | 3 +- br/pkg/stream/stream_metas.go | 40 +++-- br/pkg/stream/stream_metas_test.go | 240 ++++++++++++----------------- br/pkg/task/stream.go | 55 +++++-- 8 files changed, 221 insertions(+), 174 deletions(-) diff --git a/br/pkg/glue/BUILD.bazel b/br/pkg/glue/BUILD.bazel index 1ab377fc602dd..819414ff70eac 100644 --- a/br/pkg/glue/BUILD.bazel +++ b/br/pkg/glue/BUILD.bazel @@ -18,10 +18,12 @@ go_library( "//pkg/parser/model", "//pkg/sessionctx", "@com_github_fatih_color//:color", + "@com_github_pingcap_log//:log", "@com_github_tikv_pd_client//:client", "@com_github_vbauerster_mpb_v7//:mpb", "@com_github_vbauerster_mpb_v7//decor", "@org_golang_x_term//:term", + "@org_uber_go_zap//:zap", ], ) diff --git a/br/pkg/glue/console_glue.go b/br/pkg/glue/console_glue.go index a780d67dd267e..fcd1abd9445b1 100644 --- a/br/pkg/glue/console_glue.go +++ b/br/pkg/glue/console_glue.go @@ -11,6 +11,8 @@ import ( "time" "github.com/fatih/color" + "github.com/pingcap/log" + "go.uber.org/zap" "golang.org/x/term" ) @@ -32,8 +34,13 @@ type ExtraField func() [2]string // WithTimeCost adds the task information of time costing for `ShowTask`. func WithTimeCost() ExtraField { start := time.Now() + var cached time.Duration + return func() [2]string { - return [2]string{"take", time.Since(start).Round(time.Millisecond).String()} + if cached == 0 { + cached = time.Since(start).Round(time.Millisecond) + } + return [2]string{"take", cached.String()} } } @@ -65,14 +72,10 @@ func printFinalMessage(extraFields []ExtraField) func() string { // ShowTask prints a task start information, and mark as finished when the returned function called. // This is for TUI presenting. func (ops ConsoleOperations) ShowTask(message string, extraFields ...ExtraField) func() { - ops.Print(message) + bar := ops.StartProgressBar(message, OnlyOneTask, extraFields...) return func() { - fields := make([]string, 0, len(extraFields)) - for _, fieldFunc := range extraFields { - field := fieldFunc() - fields = append(fields, fmt.Sprintf("%s = %s", field[0], color.New(color.Bold).Sprint(field[1]))) - } - ops.Printf("%s { %s }\n", color.HiGreenString("DONE"), strings.Join(fields, ", ")) + bar.Inc() + bar.Close() } } @@ -84,6 +87,21 @@ func (ops ConsoleOperations) RootFrame() Frame { } } +func PrintList[T any](ops ConsoleOperations, title string, items []T, maxItemsDisplay int) { + log.Info("Print list: all items.", zap.String("title", title), zap.Any("items", items)) + ops.Println(title) + toPrint := items + if maxItemsDisplay > 0 { + toPrint = items[:min(len(items), maxItemsDisplay)] + } + for _, item := range toPrint { + ops.Printf("- %v\n", item) + } + if len(items) > len(toPrint) { + ops.Printf("... and %d more ...", len(items)-len(toPrint)) + } +} + // PromptBool prompts a boolean from the user. func (ops ConsoleOperations) PromptBool(p string) bool { if !ops.IsInteractive() { diff --git a/br/pkg/glue/progressing.go b/br/pkg/glue/progressing.go index 3182e46ba53df..fd1616f035e4c 100644 --- a/br/pkg/glue/progressing.go +++ b/br/pkg/glue/progressing.go @@ -18,7 +18,15 @@ import ( const OnlyOneTask int = -1 -var spinnerText []string = []string{".", "..", "..."} +func coloredSpinner(s []string) []string { + c := color.New(color.Bold, color.FgGreen) + for i := range s { + s[i] = c.Sprint(s[i]) + } + return s +} + +var spinnerText []string = coloredSpinner([]string{"/", "-", "\\", "|"}) type pbProgress struct { bar *mpb.Bar @@ -44,6 +52,13 @@ func (p pbProgress) GetCurrent() int64 { // Close marks the progress as 100% complete and that Inc() can no longer be // called. func (p pbProgress) Close() { + // This wait shouldn't block. + // We are just waiting the progress bar refresh to the finished state. + defer func() { + p.bar.Wait() + p.progress.Wait() + }() + if p.bar.Completed() || p.bar.Aborted() { return } @@ -162,7 +177,7 @@ func buildProgressBar(pb *mpb.Progress, title string, total int, extraFields ... } var ( - spinnerDoneText = fmt.Sprintf("... %s", color.GreenString("DONE")) + spinnerDoneText = fmt.Sprintf(":: %s", color.GreenString("DONE")) ) func buildOneTaskBar(pb *mpb.Progress, title string, total int) *mpb.Bar { diff --git a/br/pkg/storage/local.go b/br/pkg/storage/local.go index b825c79e90381..24f530109053d 100644 --- a/br/pkg/storage/local.go +++ b/br/pkg/storage/local.go @@ -52,7 +52,7 @@ func (l *LocalStorage) DeleteFile(_ context.Context, name string) error { os.IsNotExist(err) { return nil } - return err + return errors.Annotatef(err, "failed to delete file %v", name) } // DeleteFiles deletes the files. diff --git a/br/pkg/stream/BUILD.bazel b/br/pkg/stream/BUILD.bazel index e72ef472d26f6..768748325e776 100644 --- a/br/pkg/stream/BUILD.bazel +++ b/br/pkg/stream/BUILD.bazel @@ -64,7 +64,7 @@ go_test( ], embed = [":stream"], flaky = True, - shard_count = 44, + shard_count = 48, deps = [ "//br/pkg/storage", "//br/pkg/streamhelper", @@ -87,6 +87,7 @@ go_test( "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//oracle", "@org_golang_x_exp//maps", + "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", ], ) diff --git a/br/pkg/stream/stream_metas.go b/br/pkg/stream/stream_metas.go index 14c65da097472..f738caaee85fd 100644 --- a/br/pkg/stream/stream_metas.go +++ b/br/pkg/stream/stream_metas.go @@ -761,8 +761,8 @@ func (m MigrationExt) MergeAndMigrateTo( err = m.writeBase(ctx, newBase) if err != nil { result.Warnings = append( - result.MigratedTo.Warnings, - errors.Annotatef(err, "failed to save the merged new base, nothing will happen"), + result.Warnings, + errors.Annotate(err, "failed to save the merged new base"), ) // Put the new BASE here anyway. The caller may want this. result.NewBase = newBase @@ -784,9 +784,9 @@ func (m MigrationExt) MergeAndMigrateTo( result.MigratedTo = m.MigrateTo(ctx, newBase, MTMaybeSkipTruncateLog(!config.alwaysRunTruncate && canSkipTruncate)) // Put the final BASE. - err = m.writeBase(ctx, result.MigratedTo.NewBase) + err = m.writeBase(ctx, result.NewBase) if err != nil { - result.Warnings = append(result.MigratedTo.Warnings, errors.Annotatef(err, "failed to save the new base")) + result.Warnings = append(result.Warnings, errors.Annotatef(err, "failed to save the new base")) } return } @@ -820,8 +820,6 @@ func (m MigrationExt) MigrateTo(ctx context.Context, mig *pb.Migration, opts ... result := MigratedTo{ NewBase: new(pb.Migration), } - // Fills: EditMeta for new Base. - m.doMetaEdits(ctx, mig, &result) // Fills: TruncatedTo, Compactions, DesctructPrefix. if !opt.skipTruncateLog { m.doTruncating(ctx, mig, &result) @@ -831,6 +829,10 @@ func (m MigrationExt) MigrateTo(ctx context.Context, mig *pb.Migration, opts ... result.NewBase.TruncatedTo = mig.TruncatedTo } + // We do skip truncate log first, so metas removed by truncating can be removed in this execution. + // Fills: EditMeta for new Base. + m.doMetaEdits(ctx, mig, &result) + return result } @@ -847,6 +849,7 @@ func (m MigrationExt) writeBase(ctx context.Context, mig *pb.Migration) error { } // doMetaEdits applies the modification to the meta files in the storage. +// This will delete data files firstly. Make sure the new BASE was persisted before calling this. func (m MigrationExt) doMetaEdits(ctx context.Context, mig *pb.Migration, out *MigratedTo) { m.Hooks.StartHandlingMetaEdits(mig.EditMeta) @@ -854,14 +857,26 @@ func (m MigrationExt) doMetaEdits(ctx context.Context, mig *pb.Migration, out *M if isEmptyEdition(medit) { return } + + // Sometimes, the meta file will be deleted by truncating. + // We clean up those meta edits. + // NOTE: can we unify the deletion of truncating and meta editing? + // Say, add a "normalize" phase that load all files to be deleted to the migration. + // The problem here is a huge migration may be created in memory then leading to OOM. + exists, errChkExist := m.s.FileExists(ctx, medit.Path) + if errChkExist == nil && !exists { + log.Warn("The meta file doesn't exist, skipping the edit", zap.String("path", medit.Path)) + return + } + + // Firstly delete data so they won't leak when BR crashes. + m.cleanUpFor(ctx, medit, out) err := m.applyMetaEdit(ctx, medit) if err != nil { out.NewBase.EditMeta = append(out.NewBase.EditMeta, medit) out.Warnings = append(out.Warnings, errors.Annotatef(err, "failed to apply meta edit %s to meta file", medit.Path)) return } - - m.cleanUpFor(ctx, medit, out) } defer m.Hooks.HandingMetaEditDone() @@ -903,6 +918,13 @@ func (m MigrationExt) cleanUpFor(ctx context.Context, medit *pb.MetaEdit, out *M } } + if len(out.Warnings) > 0 { + log.Warn( + "Failed to clean up for meta edit.", + zap.String("meta-edit", medit.Path), + zap.Errors("warnings", out.Warnings), + ) + } if !isEmptyEdition(newMetaEdit) { out.NewBase.EditMeta = append(out.NewBase.EditMeta, newMetaEdit) } @@ -941,7 +963,6 @@ func (m MigrationExt) applyMetaEditTo(ctx context.Context, medit *pb.MetaEdit, m }) metadata.FileGroups = slices.DeleteFunc(metadata.FileGroups, func(dfg *pb.DataFileGroup) bool { del := slices.Contains(medit.DeletePhysicalFiles, dfg.Path) - fmt.Println(medit.Path, medit.DeletePhysicalFiles, dfg.Path, del) return del }) for _, group := range metadata.FileGroups { @@ -1110,6 +1131,7 @@ func (m MigrationExt) doTruncateLogs( // We have already written `truncated-to` to the storage hence // we don't need to worry that the user access files already deleted. aOut := new(MigratedTo) + aOut.NewBase = new(pb.Migration) m.cleanUpFor(ctx, me, aOut) updateResult(func(r *MigratedTo) { r.Warnings = append(r.Warnings, aOut.Warnings...) diff --git a/br/pkg/stream/stream_metas_test.go b/br/pkg/stream/stream_metas_test.go index c6055459a26c0..bcea9cb585953 100644 --- a/br/pkg/stream/stream_metas_test.go +++ b/br/pkg/stream/stream_metas_test.go @@ -3,6 +3,7 @@ package stream import ( + "bytes" "context" "fmt" "math" @@ -23,6 +24,7 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/pkg/util/intest" "github.com/stretchr/testify/require" + "go.uber.org/multierr" "go.uber.org/zap" "golang.org/x/exp/maps" ) @@ -347,8 +349,6 @@ func TestTruncateSafepoint(t *testing.T) { } func TestTruncateSafepointForGCS(t *testing.T) { - t.SkipNow() - require.True(t, intest.InTest) ctx := context.Background() opts := fakestorage.Options{ @@ -389,144 +389,6 @@ func TestTruncateSafepointForGCS(t *testing.T) { } } -func fakeMetaDatas(t *testing.T, helper *MetadataHelper, cf string) []*backuppb.Metadata { - ms := []*backuppb.Metadata{ - { - StoreId: 1, - MinTs: 1500, - MaxTs: 2000, - Files: []*backuppb.DataFileInfo{ - { - MinTs: 1500, - MaxTs: 2000, - Cf: cf, - MinBeginTsInDefaultCf: 800, - }, - }, - }, - { - StoreId: 2, - MinTs: 3000, - MaxTs: 4000, - Files: []*backuppb.DataFileInfo{ - { - MinTs: 3000, - MaxTs: 4000, - Cf: cf, - MinBeginTsInDefaultCf: 2000, - }, - }, - }, - { - StoreId: 3, - MinTs: 5100, - MaxTs: 6100, - Files: []*backuppb.DataFileInfo{ - { - MinTs: 5100, - MaxTs: 6100, - Cf: cf, - MinBeginTsInDefaultCf: 1800, - }, - }, - }, - } - - m2s := make([]*backuppb.Metadata, 0, len(ms)) - for _, m := range ms { - raw, err := m.Marshal() - require.NoError(t, err) - m2, err := helper.ParseToMetadata(raw) - require.NoError(t, err) - m2s = append(m2s, m2) - } - return m2s -} - -func fakeMetaDataV2s(t *testing.T, helper *MetadataHelper, cf string) []*backuppb.Metadata { - ms := []*backuppb.Metadata{ - { - StoreId: 1, - MinTs: 1500, - MaxTs: 6100, - FileGroups: []*backuppb.DataFileGroup{ - { - MinTs: 1500, - MaxTs: 6100, - DataFilesInfo: []*backuppb.DataFileInfo{ - { - MinTs: 1500, - MaxTs: 2000, - Cf: cf, - MinBeginTsInDefaultCf: 800, - }, - { - MinTs: 3000, - MaxTs: 4000, - Cf: cf, - MinBeginTsInDefaultCf: 2000, - }, - { - MinTs: 5200, - MaxTs: 6100, - Cf: cf, - MinBeginTsInDefaultCf: 1700, - }, - }, - }, - { - MinTs: 1000, - MaxTs: 5100, - DataFilesInfo: []*backuppb.DataFileInfo{ - { - MinTs: 9000, - MaxTs: 10000, - Cf: cf, - MinBeginTsInDefaultCf: 0, - }, - { - MinTs: 3000, - MaxTs: 4000, - Cf: cf, - MinBeginTsInDefaultCf: 2000, - }, - }, - }, - }, - MetaVersion: backuppb.MetaVersion_V2, - }, - { - StoreId: 2, - MinTs: 4100, - MaxTs: 5100, - FileGroups: []*backuppb.DataFileGroup{ - { - MinTs: 4100, - MaxTs: 5100, - DataFilesInfo: []*backuppb.DataFileInfo{ - { - MinTs: 4100, - MaxTs: 5100, - Cf: cf, - MinBeginTsInDefaultCf: 1800, - }, - }, - }, - }, - MetaVersion: backuppb.MetaVersion_V2, - }, - } - m2s := make([]*backuppb.Metadata, 0, len(ms)) - for _, m := range ms { - raw, err := m.Marshal() - require.NoError(t, err) - m2, err := helper.ParseToMetadata(raw) - require.NoError(t, err) - m2s = append(m2s, m2) - } - return m2s -} - func ff(minTS, maxTS uint64) *backuppb.DataFileGroup { return f(0, minTS, maxTS, DefaultCF, 0) } @@ -707,12 +569,26 @@ func pmt(s storage.ExternalStorage, path string, mt *backuppb.Metadata) { } } +func pmlt(s storage.ExternalStorage, path string, mt *backuppb.Metadata, logPath func(i int) string) { + for i, g := range mt.FileGroups { + g.Path = logPath(i) + maxLen := uint64(0) + for _, sg := range g.DataFilesInfo { + if sg.RangeOffset+sg.Length > maxLen { + maxLen = sg.RangeOffset + sg.Length + } + } + os.WriteFile(g.Path, bytes.Repeat([]byte("0"), int(maxLen)), 0o644) + } + pmt(s, path, mt) +} + func pmig(s storage.ExternalStorage, num uint64, mt *backuppb.Migration) string { numS := fmt.Sprintf("%08d", num) + name := fmt.Sprintf("%s_%08X.mgrt", numS, hashMigration(mt)) if num == baseMigrationSN { - numS = baseMigrationName + name = baseMigrationName } - name := fmt.Sprintf("%s_%08X.mgrt", numS, hashMigration(mt)) p := path.Join(migrationPrefix, name) data, err := mt.Marshal() @@ -2830,3 +2706,83 @@ func TestWithSimpleTruncate(t *testing.T) { } } } + +func TestAppendingMigs(t *testing.T) { + s := tmp(t) + ctx := context.Background() + mN := func(n uint64) string { return fmt.Sprintf("v1/backupmeta/%05d.meta", n) } + lN := func(mn int) func(n int) string { + return func(n int) string { return fmt.Sprintf("v1/%05d_%05d.log", mn, n) } + } + placeholder := func(pfx string) string { + path := path.Join(pfx, "monolith") + require.NoError(t, s.WriteFile(ctx, path, []byte("🪨"))) + return path + } + // asp appends a span to the data file info. + asp := func(b *backuppb.DataFileInfo, span *backuppb.Span) *backuppb.DataFileInfo { + b.RangeOffset = span.Offset + b.RangeLength = span.Length + return b + } + + pmlt(s, mN(1), mf(1, [][]*backuppb.DataFileInfo{ + { + asp(fi(10, 20, DefaultCF, 0), sp(0, 10)), + asp(fi(15, 30, WriteCF, 8), sp(10, 15)), + asp(fi(25, 35, WriteCF, 11), sp(25, 10)), + asp(fi(42, 65, WriteCF, 20), sp(35, 10)), + }, + }), lN(1)) + pmlt(s, mN(2), mf(2, [][]*backuppb.DataFileInfo{ + { + asp(fi(45, 64, WriteCF, 32), sp(0, 19)), + asp(fi(65, 70, WriteCF, 55), sp(19, 5)), + asp(fi(50, 60, DefaultCF, 0), sp(24, 10)), + asp(fi(80, 85, WriteCF, 72), sp(34, 5)), + }, + }), lN(2)) + est := MigerationExtension(s) + + cDir := func(n uint64) string { return fmt.Sprintf("%05d/output", n) } + aDir := func(n uint64) string { return fmt.Sprintf("%05d/metas", n) } + compaction := mCompaction(placeholder(cDir(1)), placeholder(aDir(1)), 15, 66) + del11 := mLogDel(mN(1), spans(lN(1)(0), 45, sp(0, 10), sp(10, 15))) + del12 := mLogDel(mN(1), spans(lN(1)(0), 45, sp(35, 10), sp(25, 10))) + del2 := mLogDel(mN(2), spans(lN(2)(0), 39, sp(24, 10))) + m := mig(compaction, del11, del2) + pmig(s, 1, m) + pmig(s, 2, mig(del12)) + + res := est.MergeAndMigrateTo(ctx, math.MaxInt, MMOptAlwaysRunTruncate(), MMOptAppendPhantomMigration(*mig(mTruncatedTo(65)))) + require.NoError(t, multierr.Combine(res.Warnings...)) + requireMigrationsEqual(t, res.NewBase, mig(mTruncatedTo(65), compaction, del2)) + require.FileExists(t, filepath.Join(s.Base(), cDir(1), "monolith")) + + res = est.MergeAndMigrateTo(ctx, math.MaxInt, MMOptInteractiveCheck(func(ctx context.Context, m *backuppb.Migration) bool { + return true + }), MMOptAlwaysRunTruncate(), MMOptAppendPhantomMigration(*mig(mTruncatedTo(100)))) + require.NoError(t, multierr.Combine(res.Warnings...)) + requireMigrationsEqual(t, res.NewBase, mig(mTruncatedTo(100))) + require.NoFileExists(t, filepath.Join(s.Base(), cDir(1), "monolith")) + require.NoFileExists(t, filepath.Join(s.Base(), mN(1))) + require.NoFileExists(t, filepath.Join(s.Base(), lN(1)(0))) +} + +func TestUserAbort(t *testing.T) { + s := tmp(t) + ctx := context.Background() + + pmig(s, 0, mig(mTruncatedTo(42))) + pmig(s, 1, mig(mTruncatedTo(96))) + est := MigerationExtension(s) + var res MergeAndMigratedTo + effs := est.DryRun(func(me MigrationExt) { + res = me.MergeAndMigrateTo(ctx, 1, MMOptInteractiveCheck(func(ctx context.Context, m *backuppb.Migration) bool { + return false + })) + }) + require.Len(t, res.Warnings, 1) + require.ErrorContains(t, res.Warnings[0], "aborted") + require.Empty(t, effs) +} diff --git a/br/pkg/task/stream.go b/br/pkg/task/stream.go index b69c857fa9aaa..6f1f5dce7bbd2 100644 --- a/br/pkg/task/stream.go +++ b/br/pkg/task/stream.go @@ -64,13 +64,14 @@ import ( ) const ( - flagYes = "yes" - flagUntil = "until" - flagStreamJSONOutput = "json" - flagStreamTaskName = "task-name" - flagStreamStartTS = "start-ts" - flagStreamEndTS = "end-ts" - flagGCSafePointTTS = "gc-ttl" + flagYes = "yes" + flagCleanUpCompactions = "clean-up-compactions" + flagUntil = "until" + flagStreamJSONOutput = "json" + flagStreamTaskName = "task-name" + flagStreamStartTS = "start-ts" + flagStreamEndTS = "end-ts" + flagGCSafePointTTS = "gc-ttl" truncateLockPath = "truncating.lock" hintOnTruncateLock = "There might be another truncate task running, or a truncate task that didn't exit properly. " + @@ -125,9 +126,10 @@ type StreamConfig struct { SafePointTTL int64 `json:"safe-point-ttl" toml:"safe-point-ttl"` // Spec for the command `truncate`, we should truncate the until when? - Until uint64 `json:"until" toml:"until"` - DryRun bool `json:"dry-run" toml:"dry-run"` - SkipPrompt bool `json:"skip-prompt" toml:"skip-prompt"` + Until uint64 `json:"until" toml:"until"` + DryRun bool `json:"dry-run" toml:"dry-run"` + SkipPrompt bool `json:"skip-prompt" toml:"skip-prompt"` + CleanUpCompactions bool `json:"clean-up-compactions" toml:"clean-up-compactions"` // Spec for the command `status`. JSONOutput bool `json:"json-output" toml:"json-output"` @@ -190,6 +192,7 @@ func DefineStreamTruncateLogFlags(flags *pflag.FlagSet) { "(support TSO or datetime, e.g. '400036290571534337' or '2018-05-11 01:42:23+0800'.)") flags.Bool(flagDryRun, false, "Run the command but don't really delete the files.") flags.BoolP(flagYes, "y", false, "Skip all prompts and always execute the command.") + flags.Bool(flagCleanUpCompactions, false, "Clean up compaction files. Including the compacted log files and expired SST files.") } func (cfg *StreamConfig) ParseStreamStatusFromFlags(flags *pflag.FlagSet) error { @@ -220,6 +223,9 @@ func (cfg *StreamConfig) ParseStreamTruncateFromFlags(flags *pflag.FlagSet) erro if cfg.DryRun, err = flags.GetBool(flagDryRun); err != nil { return errors.Trace(err) } + if cfg.CleanUpCompactions, err = flags.GetBool(flagCleanUpCompactions); err != nil { + return errors.Trace(err) + } return nil } @@ -1045,7 +1051,34 @@ func RunStreamTruncate(c context.Context, g glue.Glue, cmdName string, cfg *Stre } } - readMetaDone := console.ShowTask("Reading log backup metadata... ", glue.WithTimeCost()) + if cfg.CleanUpCompactions { + est := stream.MigerationExtension(extStorage) + est.Hooks = stream.NewProgressBarHooks(console) + newSN := math.MaxInt + optPrompt := stream.MMOptInteractiveCheck(func(ctx context.Context, m *backuppb.Migration) bool { + console.Println("We are going to do the following: ") + tbl := console.CreateTable() + stream.AddMigrationToTable(m, tbl) + tbl.Print() + return console.PromptBool("Continue? ") + }) + optAppend := stream.MMOptAppendPhantomMigration(backuppb.Migration{TruncatedTo: cfg.Until}) + opts := []stream.MergeAndMigrateToOpt{optPrompt, optAppend, stream.MMOptAlwaysRunTruncate()} + var res stream.MergeAndMigratedTo + if cfg.DryRun { + est.DryRun(func(me stream.MigrationExt) { + res = me.MergeAndMigrateTo(ctx, newSN, opts...) + }) + } else { + res = est.MergeAndMigrateTo(ctx, newSN, opts...) + } + if len(res.Warnings) > 0 { + glue.PrintList(console, "the following errors happened", res.Warnings, 10) + } + return nil + } + + readMetaDone := console.ShowTask("Reading Metadata... ", glue.WithTimeCost()) metas := stream.StreamMetadataSet{ MetadataDownloadBatchSize: cfg.MetadataDownloadBatchSize, Helper: stream.NewMetadataHelper(), From 628bab451dee31f28ac09209c7951c26ed34cc7d Mon Sep 17 00:00:00 2001 From: 3pointer Date: Mon, 2 Dec 2024 18:03:53 +0800 Subject: [PATCH 08/17] BR: support restore compacted logs with checkpoint (#57532) close pingcap/tidb#57535 --- br/pkg/checkpoint/BUILD.bazel | 2 +- br/pkg/checkpoint/checkpoint.go | 9 +- br/pkg/checkpoint/checkpoint_test.go | 112 ++++++++++++++++++------- br/pkg/checkpoint/external_storage.go | 9 -- br/pkg/checkpoint/log_restore.go | 13 +-- br/pkg/checkpoint/restore.go | 71 ++++++++++++---- br/pkg/checkpoint/storage.go | 12 +-- br/pkg/restore/import_mode_switcher.go | 68 ++++++++++----- br/pkg/restore/log_client/client.go | 15 ++-- br/pkg/restore/restorer.go | 18 +++- br/pkg/restore/snap_client/client.go | 6 +- br/pkg/storage/BUILD.bazel | 1 + br/pkg/storage/helper.go | 42 ++++++---- br/pkg/task/operator/migrate_to.go | 6 +- br/pkg/task/restore.go | 10 ++- 15 files changed, 259 insertions(+), 135 deletions(-) diff --git a/br/pkg/checkpoint/BUILD.bazel b/br/pkg/checkpoint/BUILD.bazel index c8679787db4a8..dc3471726625b 100644 --- a/br/pkg/checkpoint/BUILD.bazel +++ b/br/pkg/checkpoint/BUILD.bazel @@ -45,7 +45,7 @@ go_test( srcs = ["checkpoint_test.go"], flaky = True, race = "on", - shard_count = 8, + shard_count = 9, deps = [ ":checkpoint", "//br/pkg/gluetidb", diff --git a/br/pkg/checkpoint/checkpoint.go b/br/pkg/checkpoint/checkpoint.go index 765ede725fb98..78f9a7587d255 100644 --- a/br/pkg/checkpoint/checkpoint.go +++ b/br/pkg/checkpoint/checkpoint.go @@ -67,13 +67,7 @@ type RangeType struct { *rtree.Range } -func (r RangeType) IdentKey() []byte { - return r.StartKey -} - -type ValueType interface { - IdentKey() []byte -} +type ValueType any type CheckpointMessage[K KeyType, V ValueType] struct { // start-key of the origin range @@ -261,7 +255,6 @@ func (r *CheckpointRunner[K, V]) WaitForFinish(ctx context.Context, flush bool) // wait the range flusher exit r.wg.Wait() // remove the checkpoint lock - r.checkpointStorage.deleteLock(ctx) r.checkpointStorage.close() } diff --git a/br/pkg/checkpoint/checkpoint_test.go b/br/pkg/checkpoint/checkpoint_test.go index c6756f8058c5c..b02402005aa43 100644 --- a/br/pkg/checkpoint/checkpoint_test.go +++ b/br/pkg/checkpoint/checkpoint_test.go @@ -75,7 +75,7 @@ func TestCheckpointMetaForRestore(t *testing.T) { }, }, } - err = checkpoint.SaveCheckpointMetadataForSnapshotRestore(ctx, se, checkpointMetaForSnapshotRestore) + err = checkpoint.SaveCheckpointMetadataForSstRestore(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName, checkpointMetaForSnapshotRestore) require.NoError(t, err) checkpointMetaForSnapshotRestore2, err := checkpoint.LoadCheckpointMetadataForSnapshotRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor()) require.NoError(t, err) @@ -278,9 +278,9 @@ func TestCheckpointRestoreRunner(t *testing.T) { se, err := g.CreateSession(s.Mock.Storage) require.NoError(t, err) - err = checkpoint.SaveCheckpointMetadataForSnapshotRestore(ctx, se, &checkpoint.CheckpointMetadataForSnapshotRestore{}) + err = checkpoint.SaveCheckpointMetadataForSstRestore(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName, &checkpoint.CheckpointMetadataForSnapshotRestore{}) require.NoError(t, err) - checkpointRunner, err := checkpoint.StartCheckpointRestoreRunnerForTest(ctx, se, 5*time.Second, 3*time.Second) + checkpointRunner, err := checkpoint.StartCheckpointRestoreRunnerForTest(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName, 5*time.Second, 3*time.Second) require.NoError(t, err) data := map[string]struct { @@ -310,7 +310,7 @@ func TestCheckpointRestoreRunner(t *testing.T) { } for _, d := range data { - err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, 1, d.RangeKey) + err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, checkpoint.NewCheckpointRangeKeyItem(1, d.RangeKey)) require.NoError(t, err) } @@ -320,7 +320,7 @@ func TestCheckpointRestoreRunner(t *testing.T) { checkpointRunner.FlushChecksum(ctx, 4, 4, 4, 4) for _, d := range data2 { - err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, 2, d.RangeKey) + err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, checkpoint.NewCheckpointRangeKeyItem(2, d.RangeKey)) require.NoError(t, err) } @@ -343,7 +343,7 @@ func TestCheckpointRestoreRunner(t *testing.T) { respCount += 1 } - _, err = checkpoint.LoadCheckpointDataForSnapshotRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor(), checker) + _, err = checkpoint.LoadCheckpointDataForSstRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor(), checkpoint.SnapshotRestoreCheckpointDatabaseName, checker) require.NoError(t, err) require.Equal(t, 4, respCount) @@ -355,10 +355,10 @@ func TestCheckpointRestoreRunner(t *testing.T) { require.Equal(t, checksum[i].Crc64xor, uint64(i)) } - err = checkpoint.RemoveCheckpointDataForSnapshotRestore(ctx, s.Mock.Domain, se) + err = checkpoint.RemoveCheckpointDataForSstRestore(ctx, s.Mock.Domain, se, checkpoint.SnapshotRestoreCheckpointDatabaseName) require.NoError(t, err) - exists := checkpoint.ExistsSnapshotRestoreCheckpoint(ctx, s.Mock.Domain) + exists := checkpoint.ExistsSstRestoreCheckpoint(ctx, s.Mock.Domain, checkpoint.SnapshotRestoreCheckpointDatabaseName) require.False(t, exists) exists = s.Mock.Domain.InfoSchema().SchemaExists(pmodel.NewCIStr(checkpoint.SnapshotRestoreCheckpointDatabaseName)) require.False(t, exists) @@ -371,9 +371,9 @@ func TestCheckpointRunnerRetry(t *testing.T) { se, err := g.CreateSession(s.Mock.Storage) require.NoError(t, err) - err = checkpoint.SaveCheckpointMetadataForSnapshotRestore(ctx, se, &checkpoint.CheckpointMetadataForSnapshotRestore{}) + err = checkpoint.SaveCheckpointMetadataForSstRestore(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName, &checkpoint.CheckpointMetadataForSnapshotRestore{}) require.NoError(t, err) - checkpointRunner, err := checkpoint.StartCheckpointRestoreRunnerForTest(ctx, se, 100*time.Millisecond, 300*time.Millisecond) + checkpointRunner, err := checkpoint.StartCheckpointRestoreRunnerForTest(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName, 100*time.Millisecond, 300*time.Millisecond) require.NoError(t, err) err = failpoint.Enable("github.com/pingcap/tidb/br/pkg/checkpoint/failed-after-checkpoint-flushes", "return(true)") @@ -382,9 +382,9 @@ func TestCheckpointRunnerRetry(t *testing.T) { err = failpoint.Disable("github.com/pingcap/tidb/br/pkg/checkpoint/failed-after-checkpoint-flushes") require.NoError(t, err) }() - err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, 1, "123") + err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, checkpoint.NewCheckpointRangeKeyItem(1, "123")) require.NoError(t, err) - err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, 2, "456") + err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, checkpoint.NewCheckpointRangeKeyItem(2, "456")) require.NoError(t, err) err = checkpointRunner.FlushChecksum(ctx, 1, 1, 1, 1) require.NoError(t, err) @@ -392,7 +392,7 @@ func TestCheckpointRunnerRetry(t *testing.T) { time.Sleep(time.Second) err = failpoint.Disable("github.com/pingcap/tidb/br/pkg/checkpoint/failed-after-checkpoint-flushes") require.NoError(t, err) - err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, 3, "789") + err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, checkpoint.NewCheckpointRangeKeyItem(3, "789")) require.NoError(t, err) err = checkpointRunner.FlushChecksum(ctx, 3, 3, 3, 3) require.NoError(t, err) @@ -400,14 +400,15 @@ func TestCheckpointRunnerRetry(t *testing.T) { se, err = g.CreateSession(s.Mock.Storage) require.NoError(t, err) recordSet := make(map[string]int) - _, err = checkpoint.LoadCheckpointDataForSnapshotRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor(), - func(tableID int64, rangeKey checkpoint.RestoreValueType) { - recordSet[fmt.Sprintf("%d_%s", tableID, rangeKey)] += 1 + _, err = checkpoint.LoadCheckpointDataForSstRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor(), + checkpoint.SnapshotRestoreCheckpointDatabaseName, + func(tableID int64, v checkpoint.RestoreValueType) { + recordSet[fmt.Sprintf("%d_%s", tableID, v.RangeKey)] += 1 }) require.NoError(t, err) - require.LessOrEqual(t, 1, recordSet["1_{123}"]) - require.LessOrEqual(t, 1, recordSet["2_{456}"]) - require.LessOrEqual(t, 1, recordSet["3_{789}"]) + require.LessOrEqual(t, 1, recordSet["1_123"]) + require.LessOrEqual(t, 1, recordSet["2_456"]) + require.LessOrEqual(t, 1, recordSet["3_789"]) items, _, err := checkpoint.LoadCheckpointChecksumForRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor()) require.NoError(t, err) require.Equal(t, fmt.Sprintf("%d_%d_%d", items[1].Crc64xor, items[1].TotalBytes, items[1].TotalKvs), "1_1_1") @@ -422,14 +423,14 @@ func TestCheckpointRunnerNoRetry(t *testing.T) { se, err := g.CreateSession(s.Mock.Storage) require.NoError(t, err) - err = checkpoint.SaveCheckpointMetadataForSnapshotRestore(ctx, se, &checkpoint.CheckpointMetadataForSnapshotRestore{}) + err = checkpoint.SaveCheckpointMetadataForSstRestore(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName, &checkpoint.CheckpointMetadataForSnapshotRestore{}) require.NoError(t, err) - checkpointRunner, err := checkpoint.StartCheckpointRestoreRunnerForTest(ctx, se, 100*time.Millisecond, 300*time.Millisecond) + checkpointRunner, err := checkpoint.StartCheckpointRestoreRunnerForTest(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName, 100*time.Millisecond, 300*time.Millisecond) require.NoError(t, err) - err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, 1, "123") + err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, checkpoint.NewCheckpointRangeKeyItem(1, "123")) require.NoError(t, err) - err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, 2, "456") + err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, checkpoint.NewCheckpointRangeKeyItem(2, "456")) require.NoError(t, err) err = checkpointRunner.FlushChecksum(ctx, 1, 1, 1, 1) require.NoError(t, err) @@ -440,13 +441,14 @@ func TestCheckpointRunnerNoRetry(t *testing.T) { se, err = g.CreateSession(s.Mock.Storage) require.NoError(t, err) recordSet := make(map[string]int) - _, err = checkpoint.LoadCheckpointDataForSnapshotRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor(), - func(tableID int64, rangeKey checkpoint.RestoreValueType) { - recordSet[fmt.Sprintf("%d_%s", tableID, rangeKey)] += 1 + _, err = checkpoint.LoadCheckpointDataForSstRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor(), + checkpoint.SnapshotRestoreCheckpointDatabaseName, + func(tableID int64, v checkpoint.RestoreValueType) { + recordSet[fmt.Sprintf("%d_%s", tableID, v.RangeKey)] += 1 }) require.NoError(t, err) - require.Equal(t, 1, recordSet["1_{123}"]) - require.Equal(t, 1, recordSet["2_{456}"]) + require.Equal(t, 1, recordSet["1_123"]) + require.Equal(t, 1, recordSet["2_456"]) items, _, err := checkpoint.LoadCheckpointChecksumForRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor()) require.NoError(t, err) require.Equal(t, fmt.Sprintf("%d_%d_%d", items[1].Crc64xor, items[1].TotalBytes, items[1].TotalKvs), "1_1_1") @@ -584,3 +586,57 @@ func TestCheckpointRunnerLock(t *testing.T) { runner.WaitForFinish(ctx, true) } + +func TestCheckpointCompactedRestoreRunner(t *testing.T) { + ctx := context.Background() + s := utiltest.CreateRestoreSchemaSuite(t) + g := gluetidb.New() + se, err := g.CreateSession(s.Mock.Storage) + require.NoError(t, err) + + err = checkpoint.SaveCheckpointMetadataForSstRestore(ctx, se, checkpoint.CustomSSTRestoreCheckpointDatabaseName, nil) + require.NoError(t, err) + checkpointRunner, err := checkpoint.StartCheckpointRestoreRunnerForTest(ctx, se, checkpoint.CustomSSTRestoreCheckpointDatabaseName, 500*time.Millisecond, time.Second) + require.NoError(t, err) + + data := map[string]struct { + Name string + }{ + "a": {Name: "a"}, + "A": {Name: "A"}, + "1": {Name: "1"}, + } + + for _, d := range data { + err = checkpoint.AppendRangesForRestore(ctx, checkpointRunner, checkpoint.NewCheckpointFileItem(1, d.Name)) + require.NoError(t, err) + } + + checkpointRunner.FlushChecksum(ctx, 1, 1, 1, 1) + checkpointRunner.FlushChecksum(ctx, 2, 2, 2, 2) + + checkpointRunner.WaitForFinish(ctx, true) + + se, err = g.CreateSession(s.Mock.Storage) + require.NoError(t, err) + respCount := 0 + checker := func(tableID int64, resp checkpoint.RestoreValueType) { + require.NotNil(t, resp) + d, ok := data[resp.Name] + require.True(t, ok) + require.Equal(t, d.Name, resp.Name) + respCount++ + } + + _, err = checkpoint.LoadCheckpointDataForSstRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor(), checkpoint.CustomSSTRestoreCheckpointDatabaseName, checker) + require.NoError(t, err) + require.Equal(t, 3, respCount) + + err = checkpoint.RemoveCheckpointDataForSstRestore(ctx, s.Mock.Domain, se, checkpoint.CustomSSTRestoreCheckpointDatabaseName) + require.NoError(t, err) + + exists := checkpoint.ExistsSstRestoreCheckpoint(ctx, s.Mock.Domain, checkpoint.CustomSSTRestoreCheckpointDatabaseName) + require.False(t, exists) + exists = s.Mock.Domain.InfoSchema().SchemaExists(pmodel.NewCIStr(checkpoint.CustomSSTRestoreCheckpointDatabaseName)) + require.False(t, exists) +} diff --git a/br/pkg/checkpoint/external_storage.go b/br/pkg/checkpoint/external_storage.go index 078f2f1294e91..1d7c999aa8722 100644 --- a/br/pkg/checkpoint/external_storage.go +++ b/br/pkg/checkpoint/external_storage.go @@ -187,12 +187,3 @@ func (s *externalCheckpointStorage) updateLock(ctx context.Context) error { return nil } - -func (s *externalCheckpointStorage) deleteLock(ctx context.Context) { - if s.lockId > 0 { - err := s.storage.DeleteFile(ctx, s.CheckpointLockPath) - if err != nil { - log.Warn("failed to remove the checkpoint lock", zap.Error(err)) - } - } -} diff --git a/br/pkg/checkpoint/log_restore.go b/br/pkg/checkpoint/log_restore.go index b2ae3c398a3c8..0fd046b67ad7c 100644 --- a/br/pkg/checkpoint/log_restore.go +++ b/br/pkg/checkpoint/log_restore.go @@ -17,11 +17,9 @@ package checkpoint import ( "context" "encoding/json" - "fmt" "time" "github.com/pingcap/errors" - "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/meta/model" @@ -39,10 +37,6 @@ type LogRestoreValueType struct { Foff int } -func (l LogRestoreValueType) IdentKey() []byte { - return []byte(fmt.Sprint(l.Goff, '.', l.Foff, '.', l.TableID)) -} - type LogRestoreValueMarshaled struct { // group index in the metadata Goff int `json:"goff"` @@ -50,11 +44,6 @@ type LogRestoreValueMarshaled struct { Foffs map[int64][]int `json:"foffs"` } -func (l LogRestoreValueMarshaled) IdentKey() []byte { - log.Fatal("unimplement!") - return nil -} - // valueMarshalerForLogRestore convert the checkpoint data‘s format to an smaller space-used format // input format : // @@ -299,7 +288,7 @@ func TryToGetCheckpointTaskInfo( return nil, errors.Trace(err) } } - hasSnapshotMetadata := ExistsSnapshotRestoreCheckpoint(ctx, dom) + hasSnapshotMetadata := ExistsSstRestoreCheckpoint(ctx, dom, SnapshotRestoreCheckpointDatabaseName) return &CheckpointTaskInfoForLogRestore{ Metadata: metadata, diff --git a/br/pkg/checkpoint/restore.go b/br/pkg/checkpoint/restore.go index 88ff6f8f204de..c05ec3df8c2f2 100644 --- a/br/pkg/checkpoint/restore.go +++ b/br/pkg/checkpoint/restore.go @@ -30,11 +30,31 @@ import ( type RestoreKeyType = int64 type RestoreValueType struct { // the file key of a range - RangeKey string + RangeKey string `json:"range-key,omitempty"` + // the file name, used for compacted restore + Name string `json:"name,omitempty"` } -func (rv RestoreValueType) IdentKey() []byte { - return []byte(rv.RangeKey) +type CheckpointItem struct { + tableID RestoreKeyType + // used for table full backup restore + rangeKey string + // used for table raw/txn/compacted SST restore + name string +} + +func NewCheckpointRangeKeyItem(tableID RestoreKeyType, rangeKey string) *CheckpointItem { + return &CheckpointItem{ + tableID: tableID, + rangeKey: rangeKey, + } +} + +func NewCheckpointFileItem(tableID RestoreKeyType, fileName string) *CheckpointItem { + return &CheckpointItem{ + tableID: tableID, + name: fileName, + } } func valueMarshalerForRestore(group *RangeGroup[RestoreKeyType, RestoreValueType]) ([]byte, error) { @@ -45,11 +65,12 @@ func valueMarshalerForRestore(group *RangeGroup[RestoreKeyType, RestoreValueType func StartCheckpointRestoreRunnerForTest( ctx context.Context, se glue.Session, + dbName string, tick time.Duration, retryDuration time.Duration, ) (*CheckpointRunner[RestoreKeyType, RestoreValueType], error) { runner := newCheckpointRunner[RestoreKeyType, RestoreValueType]( - newTableCheckpointStorage(se, SnapshotRestoreCheckpointDatabaseName), + newTableCheckpointStorage(se, dbName), nil, valueMarshalerForRestore) runner.startCheckpointMainLoop(ctx, tick, tick, 0, retryDuration) @@ -60,9 +81,10 @@ func StartCheckpointRestoreRunnerForTest( func StartCheckpointRunnerForRestore( ctx context.Context, se glue.Session, + dbName string, ) (*CheckpointRunner[RestoreKeyType, RestoreValueType], error) { runner := newCheckpointRunner[RestoreKeyType, RestoreValueType]( - newTableCheckpointStorage(se, SnapshotRestoreCheckpointDatabaseName), + newTableCheckpointStorage(se, dbName), nil, valueMarshalerForRestore) // for restore, no need to set lock @@ -75,25 +97,33 @@ func StartCheckpointRunnerForRestore( func AppendRangesForRestore( ctx context.Context, r *CheckpointRunner[RestoreKeyType, RestoreValueType], - tableID RestoreKeyType, - rangeKey string, + c *CheckpointItem, ) error { + var group RestoreValueType + if len(c.rangeKey) != 0 { + group.RangeKey = c.rangeKey + } else if len(c.name) != 0 { + group.Name = c.name + } else { + return errors.New("either rangekey or name should be used in checkpoint append") + } return r.Append(ctx, &CheckpointMessage[RestoreKeyType, RestoreValueType]{ - GroupKey: tableID, + GroupKey: c.tableID, Group: []RestoreValueType{ - {RangeKey: rangeKey}, + group, }, }) } // load the whole checkpoint range data and retrieve the metadata of restored ranges // and return the total time cost in the past executions -func LoadCheckpointDataForSnapshotRestore[K KeyType, V ValueType]( +func LoadCheckpointDataForSstRestore[K KeyType, V ValueType]( ctx context.Context, execCtx sqlexec.RestrictedSQLExecutor, + dbName string, fn func(K, V), ) (time.Duration, error) { - return selectCheckpointData(ctx, execCtx, SnapshotRestoreCheckpointDatabaseName, fn) + return selectCheckpointData(ctx, execCtx, dbName, fn) } func LoadCheckpointChecksumForRestore( @@ -118,28 +148,33 @@ func LoadCheckpointMetadataForSnapshotRestore( return m, err } -func SaveCheckpointMetadataForSnapshotRestore( +func SaveCheckpointMetadataForSstRestore( ctx context.Context, se glue.Session, + dbName string, meta *CheckpointMetadataForSnapshotRestore, ) error { - err := initCheckpointTable(ctx, se, SnapshotRestoreCheckpointDatabaseName, + err := initCheckpointTable(ctx, se, dbName, []string{checkpointDataTableName, checkpointChecksumTableName}) if err != nil { return errors.Trace(err) } - return insertCheckpointMeta(ctx, se, SnapshotRestoreCheckpointDatabaseName, checkpointMetaTableName, meta) + if meta != nil { + return insertCheckpointMeta(ctx, se, dbName, checkpointMetaTableName, meta) + } + return nil } -func ExistsSnapshotRestoreCheckpoint( +func ExistsSstRestoreCheckpoint( ctx context.Context, dom *domain.Domain, + dbName string, ) bool { return dom.InfoSchema(). - TableExists(pmodel.NewCIStr(SnapshotRestoreCheckpointDatabaseName), pmodel.NewCIStr(checkpointMetaTableName)) + TableExists(pmodel.NewCIStr(dbName), pmodel.NewCIStr(checkpointMetaTableName)) } -func RemoveCheckpointDataForSnapshotRestore(ctx context.Context, dom *domain.Domain, se glue.Session) error { - return dropCheckpointTables(ctx, dom, se, SnapshotRestoreCheckpointDatabaseName, +func RemoveCheckpointDataForSstRestore(ctx context.Context, dom *domain.Domain, se glue.Session, dbName string) error { + return dropCheckpointTables(ctx, dom, se, dbName, []string{checkpointDataTableName, checkpointChecksumTableName, checkpointMetaTableName}) } diff --git a/br/pkg/checkpoint/storage.go b/br/pkg/checkpoint/storage.go index 465924f8300f4..4a37a14b0da12 100644 --- a/br/pkg/checkpoint/storage.go +++ b/br/pkg/checkpoint/storage.go @@ -38,7 +38,6 @@ type checkpointStorage interface { initialLock(ctx context.Context) error updateLock(ctx context.Context) error - deleteLock(ctx context.Context) close() } @@ -48,8 +47,9 @@ type checkpointStorage interface { // 2. BR regards the metadata table as a file so that it is not empty if the table exists. // 3. BR regards the checkpoint table as a directory which is managed by metadata table. const ( - LogRestoreCheckpointDatabaseName string = "__TiDB_BR_Temporary_Log_Restore_Checkpoint" - SnapshotRestoreCheckpointDatabaseName string = "__TiDB_BR_Temporary_Snapshot_Restore_Checkpoint" + LogRestoreCheckpointDatabaseName string = "__TiDB_BR_Temporary_Log_Restore_Checkpoint" + SnapshotRestoreCheckpointDatabaseName string = "__TiDB_BR_Temporary_Snapshot_Restore_Checkpoint" + CustomSSTRestoreCheckpointDatabaseName string = "__TiDB_BR_Temporary_Custom_SST_Restore_Checkpoint" // directory level table checkpointDataTableName string = "cpt_data" @@ -90,7 +90,9 @@ const ( // IsCheckpointDB checks whether the dbname is checkpoint database. func IsCheckpointDB(dbname pmodel.CIStr) bool { - return dbname.O == LogRestoreCheckpointDatabaseName || dbname.O == SnapshotRestoreCheckpointDatabaseName + return dbname.O == LogRestoreCheckpointDatabaseName || + dbname.O == SnapshotRestoreCheckpointDatabaseName || + dbname.O == CustomSSTRestoreCheckpointDatabaseName } const CheckpointIdMapBlockSize int = 524288 @@ -142,8 +144,6 @@ func (s *tableCheckpointStorage) updateLock(ctx context.Context) error { return nil } -func (s *tableCheckpointStorage) deleteLock(ctx context.Context) {} - func (s *tableCheckpointStorage) flushCheckpointData(ctx context.Context, data []byte) error { sqls, argss := chunkInsertCheckpointSQLs(s.checkpointDBName, checkpointDataTableName, data) for i, sql := range sqls { diff --git a/br/pkg/restore/import_mode_switcher.go b/br/pkg/restore/import_mode_switcher.go index be01389c19e5f..0bec6a4d1e384 100644 --- a/br/pkg/restore/import_mode_switcher.go +++ b/br/pkg/restore/import_mode_switcher.go @@ -31,7 +31,9 @@ type ImportModeSwitcher struct { switchModeInterval time.Duration tlsConf *tls.Config - switchCh chan struct{} + mu sync.Mutex + cancel context.CancelFunc // Manages goroutine lifecycle + wg sync.WaitGroup } func NewImportModeSwitcher( @@ -43,15 +45,23 @@ func NewImportModeSwitcher( pdClient: pdClient, switchModeInterval: switchModeInterval, tlsConf: tlsConf, - switchCh: make(chan struct{}), } } -var closeOnce sync.Once - // switchToNormalMode switch tikv cluster to normal mode. func (switcher *ImportModeSwitcher) SwitchToNormalMode(ctx context.Context) error { - closeOnce.Do(func() { close(switcher.switchCh) }) + switcher.mu.Lock() + defer switcher.mu.Unlock() + + if switcher.cancel == nil { + log.Info("TiKV is already in normal mode") + return nil + } + log.Info("Stopping the import mode goroutine") + switcher.cancel() + switcher.cancel = nil + // wait for switch goroutine exits + switcher.wg.Wait() return switcher.switchTiKVMode(ctx, import_sstpb.SwitchMode_Normal) } @@ -116,26 +126,43 @@ func (switcher *ImportModeSwitcher) switchTiKVMode( return nil } -// SwitchToImportMode switch tikv cluster to import mode. -func (switcher *ImportModeSwitcher) SwitchToImportMode( +// GoSwitchToImportMode switch tikv cluster to import mode. +func (switcher *ImportModeSwitcher) GoSwitchToImportMode( ctx context.Context, -) { +) error { + switcher.mu.Lock() + defer switcher.mu.Unlock() + + if switcher.cancel != nil { + log.Info("TiKV is already in import mode") + return nil + } + + // Create a new context for the goroutine + ctx, cancel := context.WithCancel(context.Background()) + switcher.cancel = cancel + + // [important!] switch tikv mode into import at the beginning + log.Info("switch to import mode at beginning") + err := switcher.switchTiKVMode(ctx, import_sstpb.SwitchMode_Import) + if err != nil { + log.Warn("switch to import mode failed", zap.Error(err)) + return errors.Trace(err) + } + switcher.wg.Add(1) // tikv automatically switch to normal mode in every 10 minutes // so we need ping tikv in less than 10 minute go func() { tick := time.NewTicker(switcher.switchModeInterval) - defer tick.Stop() - - // [important!] switch tikv mode into import at the beginning - log.Info("switch to import mode at beginning") - err := switcher.switchTiKVMode(ctx, import_sstpb.SwitchMode_Import) - if err != nil { - log.Warn("switch to import mode failed", zap.Error(err)) - } + defer func() { + switcher.wg.Done() + tick.Stop() + }() for { select { case <-ctx.Done(): + log.Info("stop automatic switch to import mode when context done") return case <-tick.C: log.Info("switch to import mode") @@ -143,12 +170,10 @@ func (switcher *ImportModeSwitcher) SwitchToImportMode( if err != nil { log.Warn("switch to import mode failed", zap.Error(err)) } - case <-switcher.switchCh: - log.Info("stop automatic switch to import mode") - return } } }() + return nil } // RestorePreWork executes some prepare work before restore. @@ -166,7 +191,10 @@ func RestorePreWork( if switchToImport { // Switch TiKV cluster to import mode (adjust rocksdb configuration). - switcher.SwitchToImportMode(ctx) + err := switcher.GoSwitchToImportMode(ctx) + if err != nil { + return pdutil.Nop, nil, err + } } return mgr.RemoveSchedulersWithConfig(ctx) diff --git a/br/pkg/restore/log_client/client.go b/br/pkg/restore/log_client/client.go index 4faf59a316657..ec3e3539324ad 100644 --- a/br/pkg/restore/log_client/client.go +++ b/br/pkg/restore/log_client/client.go @@ -166,7 +166,7 @@ func NewSstRestoreManager( return nil, errors.Trace(err) } if se != nil { - checkpointRunner, err := checkpoint.StartCheckpointRunnerForRestore(ctx, se) + checkpointRunner, err := checkpoint.StartCheckpointRunnerForRestore(ctx, se, checkpoint.CustomSSTRestoreCheckpointDatabaseName) if err != nil { return nil, errors.Trace(err) } @@ -284,7 +284,10 @@ func (rc *LogClient) RestoreCompactedSstFiles( log.Info("[Compacted SST Restore] No SST files found for restoration.") return nil } - importModeSwitcher.SwitchToImportMode(ctx) + err := importModeSwitcher.GoSwitchToImportMode(ctx) + if err != nil { + return errors.Trace(err) + } defer func() { switchErr := importModeSwitcher.SwitchToNormalMode(ctx) if switchErr != nil { @@ -298,11 +301,9 @@ func (rc *LogClient) RestoreCompactedSstFiles( // where batch processing may lead to increased complexity and potential inefficiencies. // TODO: Future enhancements may explore the feasibility of reintroducing batch restoration // while maintaining optimal performance and resource utilization. - for _, i := range backupFileSets { - err := rc.sstRestoreManager.restorer.GoRestore(onProgress, []restore.BackupFileSet{i}) - if err != nil { - return errors.Trace(err) - } + err = rc.sstRestoreManager.restorer.GoRestore(onProgress, backupFileSets) + if err != nil { + return errors.Trace(err) } return rc.sstRestoreManager.restorer.WaitUntilFinish() } diff --git a/br/pkg/restore/restorer.go b/br/pkg/restore/restorer.go index 75a21b583eb1f..4e36086eee1c2 100644 --- a/br/pkg/restore/restorer.go +++ b/br/pkg/restore/restorer.go @@ -203,7 +203,16 @@ func (s *SimpleRestorer) GoRestore(onProgress func(int64), batchFileSets ...Batc if err != nil { return errors.Trace(err) } - // TODO handle checkpoint + if s.checkpointRunner != nil { + // The checkpoint shows this ranges of files has been restored into + // the table corresponding to the table-id. + for _, f := range set.SSTFiles { + if err := checkpoint.AppendRangesForRestore(s.ectx, s.checkpointRunner, + checkpoint.NewCheckpointFileItem(set.TableID, f.GetName())); err != nil { + return errors.Trace(err) + } + } + } return nil }) } @@ -302,7 +311,8 @@ func (m *MultiTablesRestorer) GoRestore(onProgress func(int64), batchFileSets .. for rangeKey := range rangeKeySet { // The checkpoint range shows this ranges of kvs has been restored into // the table corresponding to the table-id. - if err := checkpoint.AppendRangesForRestore(m.ectx, m.checkpointRunner, filesGroup.TableID, rangeKey); err != nil { + if err := checkpoint.AppendRangesForRestore(m.ectx, m.checkpointRunner, + checkpoint.NewCheckpointRangeKeyItem(filesGroup.TableID, rangeKey)); err != nil { return errors.Trace(err) } } @@ -317,9 +327,11 @@ func (m *MultiTablesRestorer) GoRestore(onProgress func(int64), batchFileSets .. return m.ectx.Err() } +// GetFileRangeKey is used to reduce the checkpoint number, because we combine the write cf/default cf into one restore file group. +// during full restore, so we can reduce the checkpoint number with the common prefix of the file. func GetFileRangeKey(f string) string { // the backup date file pattern is `{store_id}_{region_id}_{epoch_version}_{key}_{ts}_{cf}.sst` - // so we need to compare with out the `_{cf}.sst` suffix + // so we need to compare without the `_{cf}.sst` suffix idx := strings.LastIndex(f, "_") if idx < 0 { panic(fmt.Sprintf("invalid backup data file name: '%s'", f)) diff --git a/br/pkg/restore/snap_client/client.go b/br/pkg/restore/snap_client/client.go index a7e0ecab3d230..7dcd37189a246 100644 --- a/br/pkg/restore/snap_client/client.go +++ b/br/pkg/restore/snap_client/client.go @@ -346,7 +346,7 @@ func (rc *SnapClient) InitCheckpoint( } // t1 is the latest time the checkpoint ranges persisted to the external storage. - t1, err := checkpoint.LoadCheckpointDataForSnapshotRestore(ctx, execCtx, func(tableID int64, v checkpoint.RestoreValueType) { + t1, err := checkpoint.LoadCheckpointDataForSstRestore(ctx, execCtx, checkpoint.SnapshotRestoreCheckpointDatabaseName, func(tableID int64, v checkpoint.RestoreValueType) { checkpointSet, exists := checkpointSetWithTableID[tableID] if !exists { checkpointSet = make(map[string]struct{}) @@ -379,7 +379,7 @@ func (rc *SnapClient) InitCheckpoint( if config != nil { meta.SchedulersConfig = &pdutil.ClusterConfig{Schedulers: config.Schedulers, ScheduleCfg: config.ScheduleCfg} } - if err := checkpoint.SaveCheckpointMetadataForSnapshotRestore(ctx, rc.db.Session(), meta); err != nil { + if err := checkpoint.SaveCheckpointMetadataForSstRestore(ctx, rc.db.Session(), checkpoint.SnapshotRestoreCheckpointDatabaseName, meta); err != nil { return checkpointSetWithTableID, nil, errors.Trace(err) } } @@ -388,7 +388,7 @@ func (rc *SnapClient) InitCheckpoint( if err != nil { return checkpointSetWithTableID, nil, errors.Trace(err) } - rc.checkpointRunner, err = checkpoint.StartCheckpointRunnerForRestore(ctx, se) + rc.checkpointRunner, err = checkpoint.StartCheckpointRunnerForRestore(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName) return checkpointSetWithTableID, checkpointClusterConfig, errors.Trace(err) } diff --git a/br/pkg/storage/BUILD.bazel b/br/pkg/storage/BUILD.bazel index 9084fc41db3cd..60c587893af9f 100644 --- a/br/pkg/storage/BUILD.bazel +++ b/br/pkg/storage/BUILD.bazel @@ -77,6 +77,7 @@ go_library( "@org_golang_google_api//transport/http", "@org_golang_x_net//http2", "@org_golang_x_oauth2//google", + "@org_golang_x_sync//errgroup", "@org_uber_go_atomic//:atomic", "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", diff --git a/br/pkg/storage/helper.go b/br/pkg/storage/helper.go index c0c5c63ba0747..d9c864cf7fa66 100644 --- a/br/pkg/storage/helper.go +++ b/br/pkg/storage/helper.go @@ -7,8 +7,12 @@ import ( "sync/atomic" "github.com/pingcap/errors" + "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/utils/iter" "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/util" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" ) func init() { @@ -48,22 +52,32 @@ func UnmarshalDir[T any](ctx context.Context, walkOpt *WalkOption, s ExternalSto errCh := make(chan error, 1) reader := func() { defer close(ch) - err := s.WalkDir(ctx, walkOpt, func(path string, size int64) error { - metaBytes, err := s.ReadFile(ctx, path) - if err != nil { - return errors.Annotatef(err, "failed during reading file %s", path) - } - var meta T - if err := unmarshal(&meta, path, metaBytes); err != nil { - return errors.Annotatef(err, "failed to parse subcompaction meta of file %s", path) - } - select { - case ch <- &meta: - case <-ctx.Done(): - return ctx.Err() - } + pool := util.NewWorkerPool(128, "metadata") + eg, ectx := errgroup.WithContext(ctx) + err := s.WalkDir(ectx, walkOpt, func(path string, size int64) error { + pool.ApplyOnErrorGroup(eg, func() error { + metaBytes, err := s.ReadFile(ectx, path) + if err != nil { + log.Error("failed to read file", zap.String("file", path)) + return errors.Annotatef(err, "during reading meta file %s from storage", path) + } + + var meta T + if err := unmarshal(&meta, path, metaBytes); err != nil { + return errors.Annotatef(err, "failed to unmarshal file %s", path) + } + select { + case ch <- &meta: + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) return nil }) + if err == nil { + err = eg.Wait() + } if err != nil { select { case errCh <- err: diff --git a/br/pkg/task/operator/migrate_to.go b/br/pkg/task/operator/migrate_to.go index 20f76b0f86967..282e82784ecb9 100644 --- a/br/pkg/task/operator/migrate_to.go +++ b/br/pkg/task/operator/migrate_to.go @@ -5,7 +5,7 @@ import ( "github.com/fatih/color" "github.com/pingcap/errors" - backup "github.com/pingcap/kvproto/pkg/brpb" + backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/stream" @@ -39,7 +39,7 @@ func (cx migrateToCtx) printErr(errs []error, msg string) { } } -func (cx migrateToCtx) askForContinue(targetMig *backup.Migration) bool { +func (cx migrateToCtx) askForContinue(targetMig *backuppb.Migration) bool { tbl := cx.console.CreateTable() stream.AddMigrationToTable(targetMig, tbl) cx.console.Println("The migration going to be executed will be like: ") @@ -124,7 +124,7 @@ func RunMigrateTo(ctx context.Context, cfg MigrateToConfig) error { } return run(func(est stream.MigrationExt) stream.MergeAndMigratedTo { - return est.MergeAndMigrateTo(ctx, targetVersion, stream.MMOptInteractiveCheck(func(ctx context.Context, m *backup.Migration) bool { + return est.MergeAndMigrateTo(ctx, targetVersion, stream.MMOptInteractiveCheck(func(ctx context.Context, m *backuppb.Migration) bool { return cfg.Yes || cx.askForContinue(m) })) }) diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index 7e433fd640cb5..65b391f678edf 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -725,12 +725,16 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if err != nil { log.Warn("failed to remove checkpoint data for log restore", zap.Error(err)) } - err = checkpoint.RemoveCheckpointDataForSnapshotRestore(c, mgr.GetDomain(), se) + err = checkpoint.RemoveCheckpointDataForSstRestore(c, mgr.GetDomain(), se, checkpoint.CustomSSTRestoreCheckpointDatabaseName) + if err != nil { + log.Warn("failed to remove checkpoint data for compacted restore", zap.Error(err)) + } + err = checkpoint.RemoveCheckpointDataForSstRestore(c, mgr.GetDomain(), se, checkpoint.SnapshotRestoreCheckpointDatabaseName) if err != nil { log.Warn("failed to remove checkpoint data for snapshot restore", zap.Error(err)) } } else { - err = checkpoint.RemoveCheckpointDataForSnapshotRestore(c, mgr.GetDomain(), se) + err = checkpoint.RemoveCheckpointDataForSstRestore(c, mgr.GetDomain(), se, checkpoint.SnapshotRestoreCheckpointDatabaseName) if err != nil { log.Warn("failed to remove checkpoint data for snapshot restore", zap.Error(err)) } @@ -875,7 +879,7 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s if cfg.UseCheckpoint { // if the checkpoint metadata exists in the checkpoint storage, the restore is not // for the first time. - existsCheckpointMetadata := checkpoint.ExistsSnapshotRestoreCheckpoint(ctx, mgr.GetDomain()) + existsCheckpointMetadata := checkpoint.ExistsSstRestoreCheckpoint(ctx, mgr.GetDomain(), checkpoint.SnapshotRestoreCheckpointDatabaseName) checkpointFirstRun = !existsCheckpointMetadata } From 9e8d35a4e2ef49db22f6921a3c3703c915ac6ab2 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Tue, 10 Dec 2024 11:41:18 +0800 Subject: [PATCH 09/17] compacted restore: fix the wrong initial configrations (#58050) ref pingcap/tidb#56522 --- br/pkg/checkpoint/checkpoint_test.go | 5 ++- br/pkg/checkpoint/restore.go | 4 +- br/pkg/restore/log_client/client.go | 51 ++++++++++++++++------- br/pkg/restore/restorer.go | 1 - br/pkg/restore/snap_client/BUILD.bazel | 2 +- br/pkg/restore/snap_client/import.go | 3 ++ br/pkg/restore/snap_client/import_test.go | 7 ++++ br/tests/br_pitr/run.sh | 1 + 8 files changed, 55 insertions(+), 19 deletions(-) diff --git a/br/pkg/checkpoint/checkpoint_test.go b/br/pkg/checkpoint/checkpoint_test.go index b02402005aa43..b70348aaa5fd9 100644 --- a/br/pkg/checkpoint/checkpoint_test.go +++ b/br/pkg/checkpoint/checkpoint_test.go @@ -628,6 +628,9 @@ func TestCheckpointCompactedRestoreRunner(t *testing.T) { respCount++ } + exists := checkpoint.ExistsSstRestoreCheckpoint(ctx, s.Mock.Domain, checkpoint.CustomSSTRestoreCheckpointDatabaseName) + require.True(t, exists) + _, err = checkpoint.LoadCheckpointDataForSstRestore(ctx, se.GetSessionCtx().GetRestrictedSQLExecutor(), checkpoint.CustomSSTRestoreCheckpointDatabaseName, checker) require.NoError(t, err) require.Equal(t, 3, respCount) @@ -635,7 +638,7 @@ func TestCheckpointCompactedRestoreRunner(t *testing.T) { err = checkpoint.RemoveCheckpointDataForSstRestore(ctx, s.Mock.Domain, se, checkpoint.CustomSSTRestoreCheckpointDatabaseName) require.NoError(t, err) - exists := checkpoint.ExistsSstRestoreCheckpoint(ctx, s.Mock.Domain, checkpoint.CustomSSTRestoreCheckpointDatabaseName) + exists = checkpoint.ExistsSstRestoreCheckpoint(ctx, s.Mock.Domain, checkpoint.CustomSSTRestoreCheckpointDatabaseName) require.False(t, exists) exists = s.Mock.Domain.InfoSchema().SchemaExists(pmodel.NewCIStr(checkpoint.CustomSSTRestoreCheckpointDatabaseName)) require.False(t, exists) diff --git a/br/pkg/checkpoint/restore.go b/br/pkg/checkpoint/restore.go index c05ec3df8c2f2..2e55cc3eb81c2 100644 --- a/br/pkg/checkpoint/restore.go +++ b/br/pkg/checkpoint/restore.go @@ -170,8 +170,10 @@ func ExistsSstRestoreCheckpoint( dom *domain.Domain, dbName string, ) bool { + // we only check the existence of the checkpoint data table + // because the checkpoint metadata is not used for restore return dom.InfoSchema(). - TableExists(pmodel.NewCIStr(dbName), pmodel.NewCIStr(checkpointMetaTableName)) + TableExists(pmodel.NewCIStr(dbName), pmodel.NewCIStr(checkpointDataTableName)) } func RemoveCheckpointDataForSstRestore(ctx context.Context, dom *domain.Domain, se glue.Session, dbName string) error { diff --git a/br/pkg/restore/log_client/client.go b/br/pkg/restore/log_client/client.go index ec3e3539324ad..bfd977afe1fa6 100644 --- a/br/pkg/restore/log_client/client.go +++ b/br/pkg/restore/log_client/client.go @@ -152,6 +152,7 @@ func NewSstRestoreManager( storeCount uint, createCheckpointSessionFn func() (glue.Session, error), ) (*SstRestoreManager, error) { + var checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType] // This poolSize is similar to full restore, as both workflows are comparable. // The poolSize should be greater than concurrencyPerStore multiplied by the number of stores. poolSize := concurrencyPerStore * 32 * storeCount @@ -166,14 +167,12 @@ func NewSstRestoreManager( return nil, errors.Trace(err) } if se != nil { - checkpointRunner, err := checkpoint.StartCheckpointRunnerForRestore(ctx, se, checkpoint.CustomSSTRestoreCheckpointDatabaseName) + checkpointRunner, err = checkpoint.StartCheckpointRunnerForRestore(ctx, se, checkpoint.CustomSSTRestoreCheckpointDatabaseName) if err != nil { return nil, errors.Trace(err) } - s.checkpointRunner = checkpointRunner } - // TODO implement checkpoint - s.restorer = restore.NewSimpleSstRestorer(ctx, snapFileImporter, sstWorkerPool, nil) + s.restorer = restore.NewSimpleSstRestorer(ctx, snapFileImporter, sstWorkerPool, checkpointRunner) return s, nil } @@ -182,14 +181,13 @@ type LogClient struct { logRestoreManager *LogRestoreManager sstRestoreManager *SstRestoreManager - cipher *backuppb.CipherInfo - pdClient pd.Client - pdHTTPClient pdhttp.Client - clusterID uint64 - dom *domain.Domain - tlsConf *tls.Config - keepaliveConf keepalive.ClientParameters - concurrencyPerStore uint + cipher *backuppb.CipherInfo + pdClient pd.Client + pdHTTPClient pdhttp.Client + clusterID uint64 + dom *domain.Domain + tlsConf *tls.Config + keepaliveConf keepalive.ClientParameters rawKVClient *rawkv.RawKVBatchClient storage storage.ExternalStorage @@ -263,6 +261,7 @@ func (rc *LogClient) RestoreCompactedSstFiles( // Collect all items from the iterator in advance to avoid blocking during restoration. // This approach ensures that we have all necessary data ready for processing, // preventing any potential delays caused by waiting for the iterator to yield more items. + start := time.Now() for r := compactionsIter.TryNext(ctx); !r.Finished; r = compactionsIter.TryNext(ctx) { if r.Err != nil { return r.Err @@ -295,6 +294,13 @@ func (rc *LogClient) RestoreCompactedSstFiles( } }() + log.Info("[Compacted SST Restore] Start to restore SST files", + zap.Int("sst-file-count", len(backupFileSets)), zap.Duration("iterate-take", time.Since(start))) + start = time.Now() + defer func() { + log.Info("[Compacted SST Restore] Restore SST files finished", zap.Duration("restore-take", time.Since(start))) + }() + // To optimize performance and minimize cross-region downloads, // we are currently opting for a single restore approach instead of batch restoration. // This decision is similar to the handling of raw and txn restores, @@ -422,7 +428,7 @@ func (rc *LogClient) InitClients( opt := snapclient.NewSnapFileImporterOptions( rc.cipher, metaClient, importCli, backend, - snapclient.RewriteModeKeyspace, stores, rc.concurrencyPerStore, createCallBacks, closeCallBacks, + snapclient.RewriteModeKeyspace, stores, concurrencyPerStore, createCallBacks, closeCallBacks, ) snapFileImporter, err := snapclient.NewSnapFileImporter( ctx, rc.dom.Store().GetCodec().GetAPIVersion(), snapclient.TiDBCompcated, opt) @@ -442,9 +448,24 @@ func (rc *LogClient) InitClients( func (rc *LogClient) InitCheckpointMetadataForCompactedSstRestore( ctx context.Context, ) (map[string]struct{}, error) { - // get sst checkpoint to skip repeated files sstCheckpointSets := make(map[string]struct{}) - // TODO initial checkpoint + + if checkpoint.ExistsSstRestoreCheckpoint(ctx, rc.dom, checkpoint.CustomSSTRestoreCheckpointDatabaseName) { + // we need to load the checkpoint data for the following restore + execCtx := rc.unsafeSession.GetSessionCtx().GetRestrictedSQLExecutor() + _, err := checkpoint.LoadCheckpointDataForSstRestore(ctx, execCtx, checkpoint.CustomSSTRestoreCheckpointDatabaseName, func(tableID int64, v checkpoint.RestoreValueType) { + sstCheckpointSets[v.Name] = struct{}{} + }) + if err != nil { + return nil, errors.Trace(err) + } + } else { + // initialize the checkpoint metadata since it is the first time to restore. + err := checkpoint.SaveCheckpointMetadataForSstRestore(ctx, rc.unsafeSession, checkpoint.CustomSSTRestoreCheckpointDatabaseName, nil) + if err != nil { + return nil, errors.Trace(err) + } + } return sstCheckpointSets, nil } diff --git a/br/pkg/restore/restorer.go b/br/pkg/restore/restorer.go index 4e36086eee1c2..9d999af9c09fc 100644 --- a/br/pkg/restore/restorer.go +++ b/br/pkg/restore/restorer.go @@ -359,7 +359,6 @@ func (p *PipelineRestorerWrapper[T]) WithSplit(ctx context.Context, i iter.TryNe // Check if the accumulated items meet the criteria for splitting. if strategy.ShouldSplit() { - log.Info("Trying to start region split with accumulations") startTime := time.Now() // Execute the split operation on the accumulated items. diff --git a/br/pkg/restore/snap_client/BUILD.bazel b/br/pkg/restore/snap_client/BUILD.bazel index b9abcd2e99f7d..5df612e4750e6 100644 --- a/br/pkg/restore/snap_client/BUILD.bazel +++ b/br/pkg/restore/snap_client/BUILD.bazel @@ -82,7 +82,7 @@ go_test( ], embed = [":snap_client"], flaky = True, - shard_count = 18, + shard_count = 19, deps = [ "//br/pkg/errors", "//br/pkg/glue", diff --git a/br/pkg/restore/snap_client/import.go b/br/pkg/restore/snap_client/import.go index 4e71c6fbe0cc4..c5289f27ac99a 100644 --- a/br/pkg/restore/snap_client/import.go +++ b/br/pkg/restore/snap_client/import.go @@ -213,6 +213,9 @@ func NewSnapFileImporter( kvMode KvMode, options *SnapFileImporterOptions, ) (*SnapFileImporter, error) { + if options.concurrencyPerStore == 0 { + return nil, errors.New("concurrencyPerStore must be greater than 0") + } fileImporter := &SnapFileImporter{ apiVersion: apiVersion, kvMode: kvMode, diff --git a/br/pkg/restore/snap_client/import_test.go b/br/pkg/restore/snap_client/import_test.go index 9d9c79fe1a6f6..23b7fd8fc81a0 100644 --- a/br/pkg/restore/snap_client/import_test.go +++ b/br/pkg/restore/snap_client/import_test.go @@ -155,6 +155,13 @@ func (client *fakeImporterClient) MultiIngest( return &import_sstpb.IngestResponse{}, nil } +func TestUnproperConfigSnapImporter(t *testing.T) { + ctx := context.Background() + opt := snapclient.NewSnapFileImporterOptionsForTest(nil, nil, nil, snapclient.RewriteModeKeyspace, 0) + _, err := snapclient.NewSnapFileImporter(ctx, kvrpcpb.APIVersion_V1, snapclient.TiDBFull, opt) + require.Error(t, err) +} + func TestSnapImporter(t *testing.T) { ctx := context.Background() splitClient := split.NewFakeSplitClient() diff --git a/br/tests/br_pitr/run.sh b/br/tests/br_pitr/run.sh index b9068db6f558f..09c0fe99bf4de 100644 --- a/br/tests/br_pitr/run.sh +++ b/br/tests/br_pitr/run.sh @@ -112,6 +112,7 @@ fi # PITR restore echo "run pitr" run_sql "DROP DATABASE __TiDB_BR_Temporary_Log_Restore_Checkpoint;" +run_sql "DROP DATABASE __TiDB_BR_Temporary_Custom_SST_Restore_Checkpoint;" run_br --pd $PD_ADDR restore point -s "local://$TEST_DIR/$PREFIX/log" --full-backup-storage "local://$TEST_DIR/$PREFIX/full" > $res_file 2>&1 check_result From 3186b6af9cc2542316cb4d8e5ea5b940fcf93cab Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 19 Dec 2024 10:34:00 +0800 Subject: [PATCH 10/17] compact restore: use closure to initial snapshot restore checkpoint (#58146) close pingcap/tidb#58237 --- br/pkg/restore/snap_client/client.go | 23 ++++++++++++++++++----- br/pkg/restore/snap_client/tikv_sender.go | 5 +++-- br/pkg/task/restore_raw.go | 4 ++-- br/pkg/task/restore_txn.go | 4 ++-- br/tests/br_restore_checkpoint/run.sh | 10 +++++++--- 5 files changed, 32 insertions(+), 14 deletions(-) diff --git a/br/pkg/restore/snap_client/client.go b/br/pkg/restore/snap_client/client.go index 7dcd37189a246..5883ce0e2e13f 100644 --- a/br/pkg/restore/snap_client/client.go +++ b/br/pkg/restore/snap_client/client.go @@ -76,6 +76,8 @@ const minBatchDdlSize = 1 type SnapClient struct { restorer restore.SstRestorer + // Use a closure to lazy load checkpoint runner + getRestorerFn func(*checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType]) restore.SstRestorer // Tool clients used by SnapClient pdClient pd.Client pdHTTPClient pdhttp.Client @@ -148,7 +150,8 @@ type SnapClient struct { rewriteMode RewriteMode // checkpoint information for snapshot restore - checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType] + checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType] + checkpointChecksum map[int64]*checkpoint.ChecksumItem } @@ -168,7 +171,10 @@ func NewRestoreClient( } } -func (rc *SnapClient) GetRestorer() restore.SstRestorer { +func (rc *SnapClient) GetRestorer(checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType]) restore.SstRestorer { + if rc.restorer == nil { + rc.restorer = rc.getRestorerFn(checkpointRunner) + } return rc.restorer } @@ -389,7 +395,10 @@ func (rc *SnapClient) InitCheckpoint( return checkpointSetWithTableID, nil, errors.Trace(err) } rc.checkpointRunner, err = checkpoint.StartCheckpointRunnerForRestore(ctx, se, checkpoint.SnapshotRestoreCheckpointDatabaseName) - return checkpointSetWithTableID, checkpointClusterConfig, errors.Trace(err) + if err != nil { + return checkpointSetWithTableID, nil, errors.Trace(err) + } + return checkpointSetWithTableID, checkpointClusterConfig, nil } func (rc *SnapClient) WaitForFinishCheckpoint(ctx context.Context, flush bool) { @@ -539,7 +548,9 @@ func (rc *SnapClient) initClients(ctx context.Context, backend *backuppb.Storage return errors.Trace(err) } // Raw/Txn restore are not support checkpoint for now - rc.restorer = restore.NewSimpleSstRestorer(ctx, fileImporter, rc.workerPool, nil) + rc.getRestorerFn = func(checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType]) restore.SstRestorer { + return restore.NewSimpleSstRestorer(ctx, fileImporter, rc.workerPool, nil) + } } else { // or create a fileImporter with the cluster API version fileImporter, err = NewSnapFileImporter( @@ -547,7 +558,9 @@ func (rc *SnapClient) initClients(ctx context.Context, backend *backuppb.Storage if err != nil { return errors.Trace(err) } - rc.restorer = restore.NewMultiTablesRestorer(ctx, fileImporter, rc.workerPool, rc.checkpointRunner) + rc.getRestorerFn = func(checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType]) restore.SstRestorer { + return restore.NewMultiTablesRestorer(ctx, fileImporter, rc.workerPool, checkpointRunner) + } } return nil } diff --git a/br/pkg/restore/snap_client/tikv_sender.go b/br/pkg/restore/snap_client/tikv_sender.go index 57f73835beda7..66909d8ac5a7b 100644 --- a/br/pkg/restore/snap_client/tikv_sender.go +++ b/br/pkg/restore/snap_client/tikv_sender.go @@ -385,9 +385,10 @@ func (rc *SnapClient) RestoreSSTFiles( } }) - retErr = rc.restorer.GoRestore(onProgress, tableIDWithFilesGroup...) + r := rc.GetRestorer(rc.checkpointRunner) + retErr = r.GoRestore(onProgress, tableIDWithFilesGroup...) if retErr != nil { return retErr } - return rc.restorer.WaitUntilFinish() + return r.WaitUntilFinish() } diff --git a/br/pkg/task/restore_raw.go b/br/pkg/task/restore_raw.go index acb2e48041e64..1680e60472f7b 100644 --- a/br/pkg/task/restore_raw.go +++ b/br/pkg/task/restore_raw.go @@ -163,11 +163,11 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR defer restore.RestorePostWork(ctx, importModeSwitcher, restoreSchedulers, cfg.Online) start := time.Now() - err = client.GetRestorer().GoRestore(onProgress, restore.CreateUniqueFileSets(files)) + err = client.GetRestorer(nil).GoRestore(onProgress, restore.CreateUniqueFileSets(files)) if err != nil { return errors.Trace(err) } - err = client.GetRestorer().WaitUntilFinish() + err = client.GetRestorer(nil).WaitUntilFinish() if err != nil { return errors.Trace(err) } diff --git a/br/pkg/task/restore_txn.go b/br/pkg/task/restore_txn.go index 2af64a59602cc..16d8e099f659d 100644 --- a/br/pkg/task/restore_txn.go +++ b/br/pkg/task/restore_txn.go @@ -102,11 +102,11 @@ func RunRestoreTxn(c context.Context, g glue.Glue, cmdName string, cfg *Config) } defer restore.RestorePostWork(ctx, importModeSwitcher, restoreSchedulers, false) - err = client.GetRestorer().GoRestore(onProgress, restore.CreateUniqueFileSets(files)) + err = client.GetRestorer(nil).GoRestore(onProgress, restore.CreateUniqueFileSets(files)) if err != nil { return errors.Trace(err) } - err = client.GetRestorer().WaitUntilFinish() + err = client.GetRestorer(nil).WaitUntilFinish() if err != nil { return errors.Trace(err) } diff --git a/br/tests/br_restore_checkpoint/run.sh b/br/tests/br_restore_checkpoint/run.sh index da45692cdcb62..7fe1a0e678773 100644 --- a/br/tests/br_restore_checkpoint/run.sh +++ b/br/tests/br_restore_checkpoint/run.sh @@ -69,16 +69,20 @@ if [ $restore_fail -ne 1 ]; then fi # PITR with checkpoint but failed in the log restore metakv stage -export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/restore/snap_client/corrupt-files=return(\"only-last-table-files\");\ -github.com/pingcap/tidb/br/pkg/restore/log_client/failed-after-id-maps-saved=return(true)" +export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/restore/snap_client/corrupt-files=return(\"only-last-table-files\")" +export GO_FAILPOINTS=$GO_FAILPOINTS";github.com/pingcap/tidb/br/pkg/restore/log_client/failed-after-id-maps-saved=return(true)" restore_fail=0 run_br --pd $PD_ADDR restore point --full-backup-storage "local://$TEST_DIR/$PREFIX/full" -s "local://$TEST_DIR/$PREFIX/log" || restore_fail=1 export GO_FAILPOINTS="" if [ $restore_fail -ne 1 ]; then - echo 'PITR success' + echo 'PITR success, but should fail' exit 1 fi +# check the snapshot restore has checkpoint data +run_sql 'select count(*) from '"__TiDB_BR_Temporary_Snapshot_Restore_Checkpoint"'.`cpt_data`;' +check_contains "count(*): 1" + # PITR with checkpoint but failed in the log restore datakv stage # skip the snapshot restore stage export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/task/corrupt-files=return(\"corrupt-last-table-files\")" From 2ce950bc177611c67cc78a6b02a66569a8c55f36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B1=B1=E5=B2=9A?= <36239017+YuJuncen@users.noreply.github.com> Date: Tue, 14 Jan 2025 18:02:46 +0800 Subject: [PATCH 11/17] br/cmd: added operator checksum-as|force-flush (#58801) close pingcap/tidb#58798 --- DEPS.bzl | 12 +- br/cmd/br/operator.go | 42 ++++ br/pkg/task/operator/BUILD.bazel | 17 ++ br/pkg/task/operator/checksum_table.go | 269 +++++++++++++++++++++++++ br/pkg/task/operator/config.go | 93 +++++++-- br/pkg/task/operator/force_flush.go | 101 ++++++++++ br/tests/br_test_utils.sh | 2 + go.mod | 2 +- go.sum | 4 +- 9 files changed, 512 insertions(+), 30 deletions(-) create mode 100644 br/pkg/task/operator/checksum_table.go create mode 100644 br/pkg/task/operator/force_flush.go diff --git a/DEPS.bzl b/DEPS.bzl index fa2894de582c3..b223666edae78 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -5776,13 +5776,13 @@ def go_deps(): name = "com_github_pingcap_kvproto", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/kvproto", - sha256 = "6f2a7d747d05ae61a8f4a3c066058fa69f724480f8dc4a427d66fd066ce730c7", - strip_prefix = "github.com/pingcap/kvproto@v0.0.0-20240924080114-4a3e17f5e62d", + sha256 = "db08607b0c90f3909b66577e9c568d0cbd6b2825d287d7b5caab86ea6e4b60ad", + strip_prefix = "github.com/pingcap/kvproto@v0.0.0-20250108041715-3b77f2c65c63", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20240924080114-4a3e17f5e62d.zip", - "http://ats.apps.svc/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20240924080114-4a3e17f5e62d.zip", - "https://cache.hawkingrei.com/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20240924080114-4a3e17f5e62d.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20240924080114-4a3e17f5e62d.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250108041715-3b77f2c65c63.zip", + "http://ats.apps.svc/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250108041715-3b77f2c65c63.zip", + "https://cache.hawkingrei.com/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250108041715-3b77f2c65c63.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250108041715-3b77f2c65c63.zip", ], ) go_repository( diff --git a/br/cmd/br/operator.go b/br/cmd/br/operator.go index 4e41adeab329f..abd0156a5457b 100644 --- a/br/cmd/br/operator.go +++ b/br/cmd/br/operator.go @@ -35,6 +35,8 @@ func newOperatorCommand() *cobra.Command { cmd.AddCommand(newBase64ifyCommand()) cmd.AddCommand(newListMigrationsCommand()) cmd.AddCommand(newMigrateToCommand()) + cmd.AddCommand(newForceFlushCommand()) + cmd.AddCommand(newChecksumCommand()) return cmd } @@ -109,3 +111,43 @@ func newMigrateToCommand() *cobra.Command { operator.DefineFlagsForMigrateToConfig(cmd.Flags()) return cmd } + +func newChecksumCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "checksum-as", + Short: "calculate the checksum with rewrite rules", + Long: "Calculate the checksum of the current cluster (specified by `-u`) " + + "with applying the rewrite rules generated from a backup (specified by `-s`). " + + "This can be used when you have the checksum of upstream elsewhere.", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cfg := operator.ChecksumWithRewriteRulesConfig{} + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err + } + ctx := GetDefaultContext() + return operator.RunChecksumTable(ctx, tidbGlue, cfg) + }, + } + task.DefineFilterFlags(cmd, []string{"!*.*"}, false) + operator.DefineFlagsForChecksumTableConfig(cmd.Flags()) + return cmd +} + +func newForceFlushCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "force-flush", + Short: "force a log backup task to flush", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cfg := operator.ForceFlushConfig{} + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err + } + ctx := GetDefaultContext() + return operator.RunForceFlush(ctx, &cfg) + }, + } + operator.DefineFlagsForForceFlushConfig(cmd.Flags()) + return cmd +} diff --git a/br/pkg/task/operator/BUILD.bazel b/br/pkg/task/operator/BUILD.bazel index 14760027a49b8..6d232d6c36bf0 100644 --- a/br/pkg/task/operator/BUILD.bazel +++ b/br/pkg/task/operator/BUILD.bazel @@ -4,7 +4,9 @@ go_library( name = "operator", srcs = [ "base64ify.go", + "checksum_table.go", "config.go", + "force_flush.go", "list_migration.go", "migrate_to.go", "prepare_snap.go", @@ -12,22 +14,37 @@ go_library( importpath = "github.com/pingcap/tidb/br/pkg/task/operator", visibility = ["//visibility:public"], deps = [ + "//br/pkg/backup", "//br/pkg/backup/prepare_snap", + "//br/pkg/checksum", + "//br/pkg/conn", "//br/pkg/errors", "//br/pkg/glue", "//br/pkg/logutil", + "//br/pkg/metautil", "//br/pkg/pdutil", "//br/pkg/storage", "//br/pkg/stream", "//br/pkg/task", "//br/pkg/utils", + "//pkg/domain", + "//pkg/meta/model", + "//pkg/util", + "//pkg/util/engine", "@com_github_fatih_color//:color", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/brpb", + "@com_github_pingcap_kvproto//pkg/logbackuppb", + "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_pingcap_log//:log", "@com_github_spf13_pflag//:pflag", + "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//tikv", + "@com_github_tikv_client_go_v2//util", + "@com_github_tikv_pd_client//:client", + "@com_github_tikv_pd_client//opt", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//keepalive", "@org_golang_x_sync//errgroup", "@org_uber_go_multierr//:multierr", diff --git a/br/pkg/task/operator/checksum_table.go b/br/pkg/task/operator/checksum_table.go new file mode 100644 index 0000000000000..59c52f6eb4cea --- /dev/null +++ b/br/pkg/task/operator/checksum_table.go @@ -0,0 +1,269 @@ +package operator + +import ( + "context" + "encoding/json" + "os" + "sync" + "sync/atomic" + + "github.com/pingcap/errors" + backup "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/br/pkg/checksum" + "github.com/pingcap/tidb/br/pkg/conn" + "github.com/pingcap/tidb/br/pkg/glue" + "github.com/pingcap/tidb/br/pkg/metautil" + "github.com/pingcap/tidb/br/pkg/task" + "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/util" + "github.com/tikv/client-go/v2/oracle" + kvutil "github.com/tikv/client-go/v2/util" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +type checksumTableCtx struct { + cfg ChecksumWithRewriteRulesConfig + + mgr *conn.Mgr + dom *domain.Domain +} + +type tableInDB struct { + info *model.TableInfo + dbName string +} + +func RunChecksumTable(ctx context.Context, g glue.Glue, cfg ChecksumWithRewriteRulesConfig) error { + c := &checksumTableCtx{cfg: cfg} + + if err := c.init(ctx, g); err != nil { + return errors.Trace(err) + } + + curr, err := c.getTables(ctx) + if err != nil { + return errors.Trace(err) + } + + old, err := c.loadOldTableIDs(ctx) + if err != nil { + return errors.Trace(err) + } + + reqs, err := c.genRequests(ctx, old, curr) + if err != nil { + return errors.Trace(err) + } + + results, err := c.runChecksum(ctx, reqs) + if err != nil { + return errors.Trace(err) + } + + for _, result := range results { + log.Info("Checksum result", zap.String("db", result.DBName), zap.String("table", result.TableName), zap.Uint64("checksum", result.Checksum), + zap.Uint64("total_bytes", result.TotalBytes), zap.Uint64("total_kvs", result.TotalKVs)) + } + + return json.NewEncoder(os.Stdout).Encode(results) +} + +func (c *checksumTableCtx) init(ctx context.Context, g glue.Glue) error { + cfg := c.cfg + var err error + c.mgr, err = task.NewMgr(ctx, g, cfg.PD, cfg.TLS, task.GetKeepalive(&cfg.Config), cfg.CheckRequirements, true, conn.NormalVersionChecker) + if err != nil { + return err + } + + c.dom, err = g.GetDomain(c.mgr.GetStorage()) + if err != nil { + return err + } + return nil +} + +func (c *checksumTableCtx) getTables(ctx context.Context) (res []tableInDB, err error) { + sch := c.dom.InfoSchema() + dbs := sch.AllSchemas() + for _, db := range dbs { + if !c.cfg.TableFilter.MatchSchema(db.Name.L) { + continue + } + + tbls, err := sch.SchemaTableInfos(ctx, db.Name) + if err != nil { + return nil, errors.Annotatef(err, "failed to load data for db %s", db.Name) + } + for _, tbl := range tbls { + if !c.cfg.TableFilter.MatchTable(db.Name.L, tbl.Name.L) { + continue + } + log.Info("Added table from cluster.", zap.String("db", db.Name.L), zap.String("table", tbl.Name.L)) + res = append(res, tableInDB{ + info: tbl, + dbName: db.Name.L, + }) + } + } + + return +} + +func (c *checksumTableCtx) loadOldTableIDs(ctx context.Context) (res []*metautil.Table, err error) { + _, strg, err := task.GetStorage(ctx, c.cfg.Storage, &c.cfg.Config) + if err != nil { + return nil, errors.Annotate(err, "failed to create storage") + } + + mPath := metautil.MetaFile + metaContent, err := strg.ReadFile(ctx, mPath) + if err != nil { + return nil, errors.Annotatef(err, "failed to open metafile %s", mPath) + } + + var backupMeta backup.BackupMeta + if err := backupMeta.Unmarshal(metaContent); err != nil { + return nil, errors.Annotate(err, "failed to parse backupmeta") + } + + metaReader := metautil.NewMetaReader(&backupMeta, strg, &c.cfg.CipherInfo) + + tblCh := make(chan *metautil.Table, 1024) + errCh := make(chan error, 1) + go func() { + if err := metaReader.ReadSchemasFiles(ctx, tblCh, metautil.SkipFiles, metautil.SkipStats); err != nil { + errCh <- errors.Annotate(err, "failed to read schema files") + } + close(tblCh) + }() + + for { + select { + case err := <-errCh: + return nil, err + case tbl, ok := <-tblCh: + if !ok { + return + } + if !c.cfg.TableFilter.MatchTable(tbl.DB.Name.L, tbl.Info.Name.L) { + continue + } + log.Info("Added table from backup data.", zap.String("db", tbl.DB.Name.L), zap.String("table", tbl.Info.Name.L)) + res = append(res, tbl) + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type request struct { + copReq *checksum.Executor + tableName string + dbName string +} + +func (c *checksumTableCtx) genRequests(ctx context.Context, bkup []*metautil.Table, curr []tableInDB) (reqs []request, err error) { + phy, logi, err := c.mgr.GetPDClient().GetTS(ctx) + if err != nil { + return nil, errors.Annotate(err, "failed to get TSO for checksumming") + } + tso := oracle.ComposeTS(phy, logi) + + bkupTbls := map[string]map[string]*metautil.Table{} + for _, t := range bkup { + m, ok := bkupTbls[t.DB.Name.L] + if !ok { + m = make(map[string]*metautil.Table) + bkupTbls[t.DB.Name.L] = m + } + + m[t.Info.Name.L] = t + } + + for _, t := range curr { + rb := checksum.NewExecutorBuilder(t.info, tso) + rb.SetConcurrency(c.cfg.ChecksumConcurrency) + oldDB, ok := bkupTbls[t.dbName] + if !ok { + log.Warn("db not found, will skip", zap.String("db", t.dbName)) + continue + } + oldTable, ok := oldDB[t.info.Name.L] + if !ok { + log.Warn("table not found, will skip", zap.String("db", t.dbName), zap.String("table", t.info.Name.L)) + continue + } + + rb.SetOldTable(oldTable) + rb.SetExplicitRequestSourceType(kvutil.ExplicitTypeBR) + req, err := rb.Build() + if err != nil { + return nil, errors.Annotatef(err, "failed to build checksum builder for table %s.%s", t.dbName, t.info.Name.L) + } + reqs = append(reqs, request{ + copReq: req, + dbName: t.dbName, + tableName: t.info.Name.L, + }) + } + + return +} + +type ChecksumResult struct { + DBName string `json:"db_name"` + TableName string `json:"table_name"` + + Checksum uint64 `json:"checksum"` + TotalBytes uint64 `json:"total_bytes"` + TotalKVs uint64 `json:"total_kvs"` +} + +func (c *checksumTableCtx) runChecksum(ctx context.Context, reqs []request) ([]ChecksumResult, error) { + wkPool := util.NewWorkerPool(c.cfg.TableConcurrency, "checksum") + eg, ectx := errgroup.WithContext(ctx) + results := make([]ChecksumResult, 0, len(reqs)) + resultsMu := new(sync.Mutex) + + for _, req := range reqs { + wkPool.ApplyOnErrorGroup(eg, func() error { + total := req.copReq.Len() + finished := new(atomic.Int64) + resp, err := req.copReq.Execute(ectx, c.mgr.GetStorage().GetClient(), func() { + finished.Add(1) + log.Info( + "Finish one request of a table.", + zap.String("db", req.dbName), + zap.String("table", req.tableName), + zap.Int64("finished", finished.Load()), + zap.Int64("total", int64(total)), + ) + }) + if err != nil { + return err + } + res := ChecksumResult{ + DBName: req.dbName, + TableName: req.tableName, + + Checksum: resp.Checksum, + TotalBytes: resp.TotalBytes, + TotalKVs: resp.TotalKvs, + } + resultsMu.Lock() + results = append(results, res) + resultsMu.Unlock() + return nil + }) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + return results, nil +} diff --git a/br/pkg/task/operator/config.go b/br/pkg/task/operator/config.go index c42382abe504d..8ccf1ef6266b5 100644 --- a/br/pkg/task/operator/config.go +++ b/br/pkg/task/operator/config.go @@ -3,15 +3,32 @@ package operator import ( + "regexp" "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/br/pkg/backup" berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/task" "github.com/spf13/pflag" ) +const ( + flagTableConcurrency = "table-concurrency" + flagStorePatterns = "stores" + flagTTL = "ttl" + flagSafePoint = "safepoint" + flagStorage = "storage" + flagLoadCreds = "load-creds" + flagJSON = "json" + flagRecent = "recent" + flagTo = "to" + flagBase = "base" + flagYes = "yes" + flagDryRun = "dry-run" +) + type PauseGcConfig struct { task.Config @@ -23,8 +40,8 @@ type PauseGcConfig struct { } func DefineFlagsForPrepareSnapBackup(f *pflag.FlagSet) { - _ = f.DurationP("ttl", "i", 2*time.Minute, "The time-to-live of the safepoint.") - _ = f.Uint64P("safepoint", "t", 0, "The GC safepoint to be kept.") + _ = f.DurationP(flagTTL, "i", 2*time.Minute, "The time-to-live of the safepoint.") + _ = f.Uint64P(flagSafePoint, "t", 0, "The GC safepoint to be kept.") } // ParseFromFlags fills the config via the flags. @@ -34,11 +51,11 @@ func (cfg *PauseGcConfig) ParseFromFlags(flags *pflag.FlagSet) error { } var err error - cfg.SafePoint, err = flags.GetUint64("safepoint") + cfg.SafePoint, err = flags.GetUint64(flagSafePoint) if err != nil { return err } - cfg.TTL, err = flags.GetDuration("ttl") + cfg.TTL, err = flags.GetDuration(flagTTL) if err != nil { return err } @@ -54,8 +71,8 @@ type Base64ifyConfig struct { func DefineFlagsForBase64ifyConfig(flags *pflag.FlagSet) { storage.DefineFlags(flags) - flags.StringP("storage", "s", "", "The external storage input.") - flags.Bool("load-creds", false, "whether loading the credientials from current environment and marshal them to the base64 string. [!]") + flags.StringP(flagStorage, "s", "", "The external storage input.") + flags.Bool(flagLoadCreds, false, "whether loading the credientials from current environment and marshal them to the base64 string. [!]") } func (cfg *Base64ifyConfig) ParseFromFlags(flags *pflag.FlagSet) error { @@ -64,11 +81,11 @@ func (cfg *Base64ifyConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err != nil { return err } - cfg.StorageURI, err = flags.GetString("storage") + cfg.StorageURI, err = flags.GetString(flagStorage) if err != nil { return err } - cfg.LoadCerd, err = flags.GetBool("load-creds") + cfg.LoadCerd, err = flags.GetBool(flagLoadCreds) if err != nil { return err } @@ -83,8 +100,8 @@ type ListMigrationConfig struct { func DefineFlagsForListMigrationConfig(flags *pflag.FlagSet) { storage.DefineFlags(flags) - flags.StringP("storage", "s", "", "the external storage input.") - flags.Bool("json", false, "output the result in json format.") + flags.StringP(flagStorage, "s", "", "the external storage input.") + flags.Bool(flagJSON, false, "output the result in json format.") } func (cfg *ListMigrationConfig) ParseFromFlags(flags *pflag.FlagSet) error { @@ -93,11 +110,11 @@ func (cfg *ListMigrationConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err != nil { return err } - cfg.StorageURI, err = flags.GetString("storage") + cfg.StorageURI, err = flags.GetString(flagStorage) if err != nil { return err } - cfg.JSONOutput, err = flags.GetBool("json") + cfg.JSONOutput, err = flags.GetBool(flagJSON) if err != nil { return err } @@ -115,15 +132,6 @@ type MigrateToConfig struct { DryRun bool } -const ( - flagStorage = "storage" - flagRecent = "recent" - flagTo = "to" - flagBase = "base" - flagYes = "yes" - flagDryRun = "dry-run" -) - func DefineFlagsForMigrateToConfig(flags *pflag.FlagSet) { storage.DefineFlags(flags) flags.StringP(flagStorage, "s", "", "the external storage input.") @@ -180,3 +188,46 @@ func (cfg *MigrateToConfig) Verify() error { } return nil } + +type ForceFlushConfig struct { + task.Config + + // StoresPattern matches the address of TiKV. + // The address usually looks like ":20160". + // You may list the store by `pd-ctl stores`. + StoresPattern *regexp.Regexp +} + +func DefineFlagsForForceFlushConfig(f *pflag.FlagSet) { + f.String(flagStorePatterns, ".*", "The regexp to match the store peer address to be force flushed.") +} + +func (cfg *ForceFlushConfig) ParseFromFlags(flags *pflag.FlagSet) (err error) { + storePat, err := flags.GetString(flagStorePatterns) + if err != nil { + return err + } + cfg.StoresPattern, err = regexp.Compile(storePat) + if err != nil { + return errors.Annotatef(err, "invalid expression in --%s", flagStorePatterns) + } + + return cfg.Config.ParseFromFlags(flags) +} + +type ChecksumWithRewriteRulesConfig struct { + task.Config +} + +func DefineFlagsForChecksumTableConfig(f *pflag.FlagSet) { + f.Uint(flagTableConcurrency, backup.DefaultSchemaConcurrency, "The size of a BR thread pool used for backup table metas, "+ + "including tableInfo/checksum and stats.") +} + +func (cfg *ChecksumWithRewriteRulesConfig) ParseFromFlags(flags *pflag.FlagSet) (err error) { + cfg.TableConcurrency, err = flags.GetUint(flagTableConcurrency) + if err != nil { + return + } + return cfg.Config.ParseFromFlags(flags) +} diff --git a/br/pkg/task/operator/force_flush.go b/br/pkg/task/operator/force_flush.go new file mode 100644 index 0000000000000..6bdc3a0bae288 --- /dev/null +++ b/br/pkg/task/operator/force_flush.go @@ -0,0 +1,101 @@ +package operator + +import ( + "context" + "crypto/tls" + "slices" + + "github.com/pingcap/errors" + logbackup "github.com/pingcap/kvproto/pkg/logbackuppb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/br/pkg/task" + "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/pkg/util/engine" + pd "github.com/tikv/pd/client" + "github.com/tikv/pd/client/opt" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +func getAllTiKVs(ctx context.Context, p pd.Client) ([]*metapb.Store, error) { + stores, err := p.GetAllStores(ctx, opt.WithExcludeTombstone()) + if err != nil { + return nil, err + } + withoutTiFlash := slices.DeleteFunc(stores, engine.IsTiFlash) + return withoutTiFlash, err +} + +func createStoreManager(pd pd.Client, cfg *task.Config) (*utils.StoreManager, error) { + var ( + tconf *tls.Config + err error + ) + + if cfg.TLS.IsEnabled() { + tconf, err = cfg.TLS.ToTLSConfig() + if err != nil { + return nil, errors.Annotate(err, "invalid tls config") + } + } + kvMgr := utils.NewStoreManager(pd, keepalive.ClientParameters{ + Time: cfg.GRPCKeepaliveTime, + Timeout: cfg.GRPCKeepaliveTimeout, + }, tconf) + return kvMgr, nil +} + +func RunForceFlush(ctx context.Context, cfg *ForceFlushConfig) error { + pdMgr, err := dialPD(ctx, &cfg.Config) + if err != nil { + return err + } + defer pdMgr.Close() + + stores, err := createStoreManager(pdMgr.GetPDClient(), &cfg.Config) + if err != nil { + return err + } + defer stores.Close() + + tikvs, err := getAllTiKVs(ctx, pdMgr.GetPDClient()) + if err != nil { + return err + } + eg, ectx := errgroup.WithContext(ctx) + log.Info("About to start force flushing.", zap.Stringer("stores-pattern", cfg.StoresPattern)) + for _, s := range tikvs { + if !cfg.StoresPattern.MatchString(s.Address) || engine.IsTiFlash(s) { + log.Info("Skipping TiFlash or not matched TiKV.", + zap.Uint64("store", s.GetId()), zap.String("addr", s.Address), zap.Bool("tiflash?", engine.IsTiFlash(s))) + continue + } + + log.Info("Starting force flush TiKV.", zap.Uint64("store", s.GetId()), zap.String("addr", s.Address)) + eg.Go(func() error { + var logBackupCli logbackup.LogBackupClient + err := stores.WithConn(ectx, s.GetId(), func(cc *grpc.ClientConn) { + logBackupCli = logbackup.NewLogBackupClient(cc) + }) + if err != nil { + return err + } + + resp, err := logBackupCli.FlushNow(ectx, &logbackup.FlushNowRequest{}) + if err != nil { + return errors.Annotatef(err, "failed to flush store %d", s.GetId()) + } + for _, res := range resp.Results { + if !res.Success { + return errors.Errorf("failed to flush task %s at store %d: %s", res.TaskName, s.GetId(), res.ErrorMessage) + } + log.Info("Force flushed task of TiKV store.", zap.Uint64("store", s.Id), zap.String("task", res.TaskName)) + } + return nil + }) + } + return eg.Wait() +} diff --git a/br/tests/br_test_utils.sh b/br/tests/br_test_utils.sh index 9102415a77e14..9d2c79fe5a452 100644 --- a/br/tests/br_test_utils.sh +++ b/br/tests/br_test_utils.sh @@ -22,6 +22,8 @@ wait_log_checkpoint_advance() { sleep 10 local current_ts=$(python3 -c "import time; print(int(time.time() * 1000) << 18)") echo "current ts: $current_ts" + + run_br --skip-goleak --pd $PD_ADDR operator force-flush || echo "failed to run force flush, the case may be slower." i=0 while true; do # extract the checkpoint ts of the log backup task. If there is some error, the checkpoint ts should be empty diff --git a/go.mod b/go.mod index 717f361b6b62a..cb41fd518de71 100644 --- a/go.mod +++ b/go.mod @@ -86,8 +86,8 @@ require ( github.com/pingcap/errors v0.11.5-0.20240318064555-6bd07397691f github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 github.com/pingcap/fn v1.0.0 - github.com/pingcap/kvproto v0.0.0-20240924080114-4a3e17f5e62d github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d + github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63 github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5 github.com/pingcap/tidb/pkg/parser v0.0.0-20211011031125-9b13dc409c5e github.com/pingcap/tipb v0.0.0-20241022082558-0607513e7fa4 diff --git a/go.sum b/go.sum index 8e44cdbd52ad3..36c4cf5c83bf1 100644 --- a/go.sum +++ b/go.sum @@ -670,8 +670,8 @@ github.com/pingcap/fn v1.0.0/go.mod h1:u9WZ1ZiOD1RpNhcI42RucFh/lBuzTu6rw88a+oF2Z github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20240924080114-4a3e17f5e62d h1:vSdKTrF6kpcd56G5BLP0Bz88Nho2tDo7IR1+oSsBAfc= -github.com/pingcap/kvproto v0.0.0-20240924080114-4a3e17f5e62d/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63 h1:ThJ7ddLJVk96Iai2HDeyJGuuhrcBtc3HwYKJfuKPLsI= +github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d h1:y3EueKVfVykdpTyfUnQGqft0ud+xVFuCdp1XkVL0X1E= From e5113d2aeed098752ff26974f87214c77d0c385e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B1=B1=E5=B2=9A?= <36239017+YuJuncen@users.noreply.github.com> Date: Thu, 16 Jan 2025 12:31:28 +0800 Subject: [PATCH 12/17] br: copy full backup to pitr storage (#57716) close pingcap/tidb#58685 --- br/pkg/checkpoint/restore.go | 3 + br/pkg/errors/errors.go | 20 +- br/pkg/gluetidb/glue.go | 4 + br/pkg/logutil/logging.go | 24 + br/pkg/logutil/rate.go | 12 +- br/pkg/restore/import_mode_switcher.go | 2 +- br/pkg/restore/log_client/BUILD.bazel | 6 +- br/pkg/restore/log_client/client.go | 120 +++- br/pkg/restore/log_client/client_test.go | 36 +- .../log_client/compacted_file_strategy.go | 100 ++- br/pkg/restore/log_client/export_test.go | 16 + br/pkg/restore/log_client/log_file_manager.go | 70 +- br/pkg/restore/log_client/migration.go | 25 + br/pkg/restore/log_client/migration_test.go | 170 +++++ br/pkg/restore/log_client/ssts.go | 124 ++++ br/pkg/restore/restorer.go | 7 +- br/pkg/restore/snap_client/BUILD.bazel | 13 +- br/pkg/restore/snap_client/client.go | 53 +- br/pkg/restore/snap_client/import.go | 50 +- br/pkg/restore/snap_client/pitr_collector.go | 509 ++++++++++++++ .../snap_client/pitr_collector_test.go | 271 ++++++++ br/pkg/restore/utils/BUILD.bazel | 1 + br/pkg/restore/utils/rewrite_rule.go | 63 +- br/pkg/storage/BUILD.bazel | 1 + br/pkg/storage/ks3.go | 46 ++ br/pkg/storage/local.go | 15 + br/pkg/storage/locking.go | 52 ++ br/pkg/storage/locking_test.go | 12 + br/pkg/storage/s3.go | 19 + br/pkg/storage/storage.go | 10 + br/pkg/stream/BUILD.bazel | 3 + br/pkg/stream/stream_metas.go | 461 +++++++++++-- br/pkg/stream/stream_metas_test.go | 244 ++++++- br/pkg/summary/summary.go | 11 + br/pkg/task/BUILD.bazel | 4 + br/pkg/task/common.go | 4 + br/pkg/task/operator/force_flush.go | 23 - br/pkg/task/operator/list_migration.go | 8 +- br/pkg/task/operator/migrate_to.go | 20 +- br/pkg/task/operator/prepare_snap.go | 20 + br/pkg/task/restore.go | 36 +- br/pkg/task/stream.go | 109 ++- br/pkg/utils/iter/iter.go | 21 + errors.toml | 10 + pkg/executor/brie.go | 7 +- pkg/metrics/BUILD.bazel | 1 + pkg/metrics/br.go | 70 ++ pkg/metrics/metrics.go | 4 + pkg/util/BUILD.bazel | 3 + pkg/util/util.go | 7 + pkg/util/util_test.go | 16 + tests/realtikvtest/brietest/BUILD.bazel | 14 + tests/realtikvtest/brietest/main_test.go | 3 +- tests/realtikvtest/brietest/pitr_test.go | 636 ++++++++++++++++++ 54 files changed, 3327 insertions(+), 262 deletions(-) create mode 100644 br/pkg/restore/log_client/ssts.go create mode 100644 br/pkg/restore/snap_client/pitr_collector.go create mode 100644 br/pkg/restore/snap_client/pitr_collector_test.go create mode 100644 pkg/metrics/br.go create mode 100644 tests/realtikvtest/brietest/pitr_test.go diff --git a/br/pkg/checkpoint/restore.go b/br/pkg/checkpoint/restore.go index 2e55cc3eb81c2..62a1c7e360e54 100644 --- a/br/pkg/checkpoint/restore.go +++ b/br/pkg/checkpoint/restore.go @@ -19,6 +19,7 @@ import ( "encoding/json" "time" + "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/pdutil" @@ -137,6 +138,8 @@ type CheckpointMetadataForSnapshotRestore struct { UpstreamClusterID uint64 `json:"upstream-cluster-id"` RestoredTS uint64 `json:"restored-ts"` SchedulersConfig *pdutil.ClusterConfig `json:"schedulers-config"` + + RestoreUUID uuid.UUID `json:"restore-uuid"` } func LoadCheckpointMetadataForSnapshotRestore( diff --git a/br/pkg/errors/errors.go b/br/pkg/errors/errors.go index 3bd2ab776ccb3..e96855edc6a2d 100644 --- a/br/pkg/errors/errors.go +++ b/br/pkg/errors/errors.go @@ -31,15 +31,17 @@ func IsContextCanceled(err error) bool { // BR errors. var ( - ErrUnknown = errors.Normalize("internal error", errors.RFCCodeText("BR:Common:ErrUnknown")) - ErrInvalidArgument = errors.Normalize("invalid argument", errors.RFCCodeText("BR:Common:ErrInvalidArgument")) - ErrUndefinedRestoreDbOrTable = errors.Normalize("undefined restore databases or tables", errors.RFCCodeText("BR:Common:ErrUndefinedDbOrTable")) - ErrVersionMismatch = errors.Normalize("version mismatch", errors.RFCCodeText("BR:Common:ErrVersionMismatch")) - ErrFailedToConnect = errors.Normalize("failed to make gRPC channels", errors.RFCCodeText("BR:Common:ErrFailedToConnect")) - ErrInvalidMetaFile = errors.Normalize("invalid metafile: %s", errors.RFCCodeText("BR:Common:ErrInvalidMetaFile")) - ErrEnvNotSpecified = errors.Normalize("environment variable not found", errors.RFCCodeText("BR:Common:ErrEnvNotSpecified")) - ErrUnsupportedOperation = errors.Normalize("the operation is not supported", errors.RFCCodeText("BR:Common:ErrUnsupportedOperation")) - ErrInvalidRange = errors.Normalize("invalid restore range", errors.RFCCodeText("BR:Common:ErrInvalidRange")) + ErrUnknown = errors.Normalize("internal error", errors.RFCCodeText("BR:Common:ErrUnknown")) + ErrInvalidArgument = errors.Normalize("invalid argument", errors.RFCCodeText("BR:Common:ErrInvalidArgument")) + ErrUndefinedRestoreDbOrTable = errors.Normalize("undefined restore databases or tables", errors.RFCCodeText("BR:Common:ErrUndefinedDbOrTable")) + ErrVersionMismatch = errors.Normalize("version mismatch", errors.RFCCodeText("BR:Common:ErrVersionMismatch")) + ErrFailedToConnect = errors.Normalize("failed to make gRPC channels", errors.RFCCodeText("BR:Common:ErrFailedToConnect")) + ErrInvalidMetaFile = errors.Normalize("invalid metafile: %s", errors.RFCCodeText("BR:Common:ErrInvalidMetaFile")) + ErrEnvNotSpecified = errors.Normalize("environment variable not found", errors.RFCCodeText("BR:Common:ErrEnvNotSpecified")) + ErrUnsupportedOperation = errors.Normalize("the operation is not supported", errors.RFCCodeText("BR:Common:ErrUnsupportedOperation")) + ErrInvalidRange = errors.Normalize("invalid restore range", errors.RFCCodeText("BR:Common:ErrInvalidRange")) + ErrMigrationNotFound = errors.Normalize("no migration found", errors.RFCCodeText("BR:Common:ErrMigrationNotFound")) + ErrMigrationVersionNotSupported = errors.Normalize("the migration version isn't supported", errors.RFCCodeText("BR:Common:ErrMigrationVersionNotSupported")) ErrPDUpdateFailed = errors.Normalize("failed to update PD", errors.RFCCodeText("BR:PD:ErrPDUpdateFailed")) ErrPDLeaderNotFound = errors.Normalize("PD leader not found", errors.RFCCodeText("BR:PD:ErrPDLeaderNotFound")) diff --git a/br/pkg/gluetidb/glue.go b/br/pkg/gluetidb/glue.go index 9514ae4f5f7a1..622e82d9a52ac 100644 --- a/br/pkg/gluetidb/glue.go +++ b/br/pkg/gluetidb/glue.go @@ -55,6 +55,10 @@ type Glue struct { startDomainMu *sync.Mutex } +func WrapSession(se sessiontypes.Session) glue.Session { + return &tidbSession{se: se} +} + type tidbSession struct { se sessiontypes.Session } diff --git a/br/pkg/logutil/logging.go b/br/pkg/logutil/logging.go index 353ca6622e896..22480e7b12d48 100644 --- a/br/pkg/logutil/logging.go +++ b/br/pkg/logutil/logging.go @@ -15,7 +15,9 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/lightning/metric" "github.com/pingcap/tidb/pkg/util/redact" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -356,3 +358,25 @@ func (b HexBytes) String() string { func (b HexBytes) MarshalJSON() ([]byte, error) { return json.Marshal(hex.EncodeToString(b)) } + +func MarshalHistogram(m prometheus.Histogram) zapcore.ObjectMarshaler { + return zapcore.ObjectMarshalerFunc(func(mal zapcore.ObjectEncoder) error { + if m == nil { + return nil + } + + met := metric.ReadHistogram(m) + if met == nil || met.Histogram == nil { + return nil + } + + hist := met.Histogram + for _, b := range hist.GetBucket() { + key := fmt.Sprintf("lt_%f", b.GetUpperBound()) + mal.AddUint64(key, b.GetCumulativeCount()) + } + mal.AddUint64("count", hist.GetSampleCount()) + mal.AddFloat64("total", hist.GetSampleSum()) + return nil + }) +} diff --git a/br/pkg/logutil/rate.go b/br/pkg/logutil/rate.go index db7df537a81b4..f7d4d2e79c8a0 100644 --- a/br/pkg/logutil/rate.go +++ b/br/pkg/logutil/rate.go @@ -4,6 +4,7 @@ package logutil import ( "fmt" + "math" "time" "github.com/pingcap/log" @@ -12,14 +13,6 @@ import ( "go.uber.org/zap" ) -// MetricTableCreatedCounter counts how many tables created. -// TODO: when br decided to introduce Prometheus, move this to its metric package. -var MetricTableCreatedCounter = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "BR", - Name: "table_created", - Help: "The count of tables have been created.", -}) - // RateTracer is a trivial rate tracer based on a prometheus counter. // It traces the average speed from it was created. type RateTracer struct { @@ -46,6 +39,9 @@ func (r *RateTracer) Rate() float64 { // RateAt returns the rate until some instant. This function is mainly for testing. // WARN: the counter value for calculating is still its CURRENT VALUE. func (r *RateTracer) RateAt(instant time.Time) float64 { + if r.Counter == nil { + return math.NaN() + } return (metric.ReadCounter(r.Counter) - r.base) / instant.Sub(r.start).Seconds() } diff --git a/br/pkg/restore/import_mode_switcher.go b/br/pkg/restore/import_mode_switcher.go index 0bec6a4d1e384..33552ca0734bd 100644 --- a/br/pkg/restore/import_mode_switcher.go +++ b/br/pkg/restore/import_mode_switcher.go @@ -139,7 +139,7 @@ func (switcher *ImportModeSwitcher) GoSwitchToImportMode( } // Create a new context for the goroutine - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) switcher.cancel = cancel // [important!] switch tikv mode into import at the beginning diff --git a/br/pkg/restore/log_client/BUILD.bazel b/br/pkg/restore/log_client/BUILD.bazel index 7fb781e7ad0ef..13bfa3bac9334 100644 --- a/br/pkg/restore/log_client/BUILD.bazel +++ b/br/pkg/restore/log_client/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "log_file_map.go", "log_split_strategy.go", "migration.go", + "ssts.go", ], importpath = "github.com/pingcap/tidb/br/pkg/restore/log_client", visibility = ["//visibility:public"], @@ -43,11 +44,13 @@ go_library( "//pkg/kv", "//pkg/meta", "//pkg/meta/model", + "//pkg/tablecodec", "//pkg/util", "//pkg/util/codec", "//pkg/util/redact", "//pkg/util/sqlexec", "//pkg/util/table-filter", + "@com_github_docker_go_units//:go-units", "@com_github_fatih_color//:color", "@com_github_gogo_protobuf//proto", "@com_github_opentracing_opentracing_go//:opentracing-go", @@ -90,7 +93,7 @@ go_test( ], embed = [":log_client"], flaky = True, - shard_count = 45, + shard_count = 50, deps = [ "//br/pkg/errors", "//br/pkg/glue", @@ -119,6 +122,7 @@ go_test( "//pkg/util/sqlexec", "//pkg/util/table-filter", "@com_github_docker_go_units//:go-units", + "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/brpb", diff --git a/br/pkg/restore/log_client/client.go b/br/pkg/restore/log_client/client.go index bfd977afe1fa6..36b4f4f98d9d1 100644 --- a/br/pkg/restore/log_client/client.go +++ b/br/pkg/restore/log_client/client.go @@ -25,8 +25,10 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" + "github.com/docker/go-units" "github.com/fatih/color" "github.com/gogo/protobuf/proto" "github.com/opentracing/opentracing-go" @@ -40,6 +42,7 @@ import ( "github.com/pingcap/tidb/br/pkg/conn" "github.com/pingcap/tidb/br/pkg/conn/util" "github.com/pingcap/tidb/br/pkg/encryption" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" @@ -178,6 +181,7 @@ func NewSstRestoreManager( type LogClient struct { *LogFileManager + logRestoreManager *LogRestoreManager sstRestoreManager *SstRestoreManager @@ -209,6 +213,22 @@ type LogClient struct { // checkpoint information for log restore useCheckpoint bool + + logFilesStat logFilesStatistic + restoreStat restoreStatistics +} + +type restoreStatistics struct { + // restoreSSTKVSize is the total size (Original KV length) of KV pairs restored from SST files. + restoreSSTKVSize uint64 + // restoreSSTKVCount is the total number of KV pairs restored from SST files. + restoreSSTKVCount uint64 + // restoreSSTPhySize is the total size of SST files after encoding to SST files. + // this may be smaller than kv length due to compression or common prefix optimization. + restoreSSTPhySize uint64 + // restoreSSTTakes is the total time taken for restoring SST files. + // the unit is nanoseconds, hence it can be converted between `time.Duration` directly. + restoreSSTTakes uint64 } // NewRestoreClient returns a new RestoreClient. @@ -250,13 +270,34 @@ func (rc *LogClient) Close(ctx context.Context) { log.Info("Restore client closed") } -func (rc *LogClient) RestoreCompactedSstFiles( +func rewriteRulesFor(sst SSTs, rules *restoreutils.RewriteRules) (*restoreutils.RewriteRules, error) { + if r, ok := sst.(RewrittenSSTs); ok { + rewritten := r.RewrittenTo() + if rewritten != sst.TableID() { + rewriteRules := rules.Clone() + if !rewriteRules.RewriteSourceTableID(rewritten, sst.TableID()) { + return nil, errors.Annotatef( + berrors.ErrUnknown, + "table rewritten from a table id (%d) to (%d) which doesn't exist in the stream", + rewritten, + sst.TableID(), + ) + } + log.Info("Rewritten rewrite rules.", zap.Stringer("rules", rewriteRules), zap.Int64("table_id", sst.TableID()), zap.Int64("rewritten_to", rewritten)) + return rewriteRules, nil + } + } + return rules, nil +} + +func (rc *LogClient) RestoreSSTFiles( ctx context.Context, - compactionsIter iter.TryNextor[*backuppb.LogFileSubcompaction], + compactionsIter iter.TryNextor[SSTs], rules map[int64]*restoreutils.RewriteRules, importModeSwitcher *restore.ImportModeSwitcher, onProgress func(int64), ) error { + begin := time.Now() backupFileSets := make([]restore.BackupFileSet, 0, 8) // Collect all items from the iterator in advance to avoid blocking during restoration. // This approach ensures that we have all necessary data ready for processing, @@ -267,15 +308,25 @@ func (rc *LogClient) RestoreCompactedSstFiles( return r.Err } i := r.Item - rewriteRules, ok := rules[i.Meta.TableId] + + tid := i.TableID() + if r, ok := i.(RewrittenSSTs); ok { + tid = r.RewrittenTo() + } + rewriteRules, ok := rules[tid] if !ok { - log.Warn("[Compacted SST Restore] Skipping excluded table during restore.", zap.Int64("table_id", i.Meta.TableId)) + log.Warn("[Compacted SST Restore] Skipping excluded table during restore.", zap.Int64("table_id", i.TableID())) continue } + newRules, err := rewriteRulesFor(i, rewriteRules) + if err != nil { + return err + } + set := restore.BackupFileSet{ - TableID: i.Meta.TableId, - SSTFiles: i.SstOutputs, - RewriteRules: rewriteRules, + TableID: i.TableID(), + SSTFiles: i.GetSSTs(), + RewriteRules: newRules, } backupFileSets = append(backupFileSets, set) } @@ -311,7 +362,30 @@ func (rc *LogClient) RestoreCompactedSstFiles( if err != nil { return errors.Trace(err) } - return rc.sstRestoreManager.restorer.WaitUntilFinish() + err = rc.sstRestoreManager.restorer.WaitUntilFinish() + + for _, files := range backupFileSets { + for _, f := range files.SSTFiles { + log.Info("Collected file.", zap.Uint64("total_kv", f.TotalKvs), zap.Uint64("total_bytes", f.TotalBytes), zap.Uint64("size", f.Size_)) + atomic.AddUint64(&rc.restoreStat.restoreSSTKVCount, f.TotalKvs) + atomic.AddUint64(&rc.restoreStat.restoreSSTKVSize, f.TotalBytes) + atomic.AddUint64(&rc.restoreStat.restoreSSTPhySize, f.Size_) + } + } + atomic.AddUint64(&rc.restoreStat.restoreSSTTakes, uint64(time.Since(begin))) + return err +} + +func (rc *LogClient) RestoreSSTStatisticFields(pushTo *[]zapcore.Field) { + takes := time.Duration(rc.restoreStat.restoreSSTTakes) + fields := []zapcore.Field{ + zap.Uint64("restore-sst-kv-count", rc.restoreStat.restoreSSTKVCount), + zap.Uint64("restore-sst-kv-size", rc.restoreStat.restoreSSTKVSize), + zap.Uint64("restore-sst-physical-size (after compression)", rc.restoreStat.restoreSSTPhySize), + zap.Duration("restore-sst-total-take", takes), + zap.String("average-speed (sst)", units.HumanSize(float64(rc.restoreStat.restoreSSTKVSize)/takes.Seconds())+"/s"), + } + *pushTo = append(*pushTo, fields...) } func (rc *LogClient) SetRawKVBatchClient( @@ -512,13 +586,29 @@ func (rc *LogClient) InitCheckpointMetadataForLogRestore( return gcRatio, nil } -func (rc *LogClient) GetMigrations(ctx context.Context) ([]*backuppb.Migration, error) { - ext := stream.MigerationExtension(rc.storage) +type LockedMigrations struct { + Migs []*backuppb.Migration + ReadLock storage.RemoteLock +} + +func (rc *LogClient) GetMigrations(ctx context.Context) (*LockedMigrations, error) { + ext := stream.MigrationExtension(rc.storage) migs, err := ext.Load(ctx) if err != nil { return nil, errors.Trace(err) } - return migs.ListAll(), nil + + ms := migs.ListAll() + readLock, err := ext.GetReadLock(ctx, "restore stream") + if err != nil { + return nil, err + } + + lms := &LockedMigrations{ + Migs: ms, + ReadLock: readLock, + } + return lms, nil } func (rc *LogClient) InstallLogFileManager(ctx context.Context, startTS, restoreTS uint64, metadataDownloadBatchSize uint, @@ -540,6 +630,8 @@ func (rc *LogClient) InstallLogFileManager(ctx context.Context, startTS, restore if err != nil { return err } + rc.logFilesStat = logFilesStatistic{} + rc.LogFileManager.Stats = &rc.logFilesStat return nil } @@ -1470,15 +1562,15 @@ func (rc *LogClient) UpdateSchemaVersion(ctx context.Context) error { // It uses a region splitter to handle the splitting logic based on the provided rules and checkpoint sets. func (rc *LogClient) WrapCompactedFilesIterWithSplitHelper( ctx context.Context, - compactedIter iter.TryNextor[*backuppb.LogFileSubcompaction], + compactedIter iter.TryNextor[SSTs], rules map[int64]*restoreutils.RewriteRules, checkpointSets map[string]struct{}, updateStatsFn func(uint64, uint64), splitSize uint64, splitKeys int64, -) (iter.TryNextor[*backuppb.LogFileSubcompaction], error) { +) (iter.TryNextor[SSTs], error) { client := split.NewClient(rc.pdClient, rc.pdHTTPClient, rc.tlsConf, maxSplitKeysOnce, 3) - wrapper := restore.PipelineRestorerWrapper[*backuppb.LogFileSubcompaction]{ + wrapper := restore.PipelineRestorerWrapper[SSTs]{ PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(client, splitSize, splitKeys), } strategy := NewCompactedFileSplitStrategy(rules, checkpointSets, updateStatsFn) diff --git a/br/pkg/restore/log_client/client_test.go b/br/pkg/restore/log_client/client_test.go index 504d7bb798d72..d78b49f126bf3 100644 --- a/br/pkg/restore/log_client/client_test.go +++ b/br/pkg/restore/log_client/client_test.go @@ -1662,11 +1662,11 @@ func TestCompactedSplitStrategy(t *testing.T) { } cases := []struct { - MockSubcompationIter iter.TryNextor[*backuppb.LogFileSubcompaction] + MockSubcompationIter iter.TryNextor[logclient.SSTs] ExpectRegionEndKeys [][]byte }{ { - iter.FromSlice([]*backuppb.LogFileSubcompaction{ + iter.FromSlice([]logclient.SSTs{ fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), fakeSubCompactionWithOneSst(2, 100, 48*units.MiB, 300), @@ -1681,7 +1681,7 @@ func TestCompactedSplitStrategy(t *testing.T) { }, }, { - iter.FromSlice([]*backuppb.LogFileSubcompaction{ + iter.FromSlice([]logclient.SSTs{ fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), fakeSubCompactionWithOneSst(1, 100, 32*units.MiB, 10), @@ -1697,7 +1697,7 @@ func TestCompactedSplitStrategy(t *testing.T) { }, }, { - iter.FromSlice([]*backuppb.LogFileSubcompaction{ + iter.FromSlice([]logclient.SSTs{ fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), fakeSubCompactionWithOneSst(2, 100, 32*units.MiB, 300), @@ -1722,7 +1722,7 @@ func TestCompactedSplitStrategy(t *testing.T) { mockPDCli.SetRegions(oriRegions) client := split.NewClient(mockPDCli, nil, nil, 100, 4) - wrapper := restore.PipelineRestorerWrapper[*backuppb.LogFileSubcompaction]{ + wrapper := restore.PipelineRestorerWrapper[logclient.SSTs]{ PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(client, 4*units.MB, 400), } @@ -1777,14 +1777,14 @@ func TestCompactedSplitStrategyWithCheckpoint(t *testing.T) { } cases := []struct { - MockSubcompationIter iter.TryNextor[*backuppb.LogFileSubcompaction] + MockSubcompationIter iter.TryNextor[logclient.SSTs] CheckpointSet map[string]struct{} ProcessedKVCount int ProcessedSize int ExpectRegionEndKeys [][]byte }{ { - iter.FromSlice([]*backuppb.LogFileSubcompaction{ + iter.FromSlice([]logclient.SSTs{ fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), fakeSubCompactionWithOneSst(2, 100, 48*units.MiB, 300), @@ -1804,7 +1804,7 @@ func TestCompactedSplitStrategyWithCheckpoint(t *testing.T) { }, }, { - iter.FromSlice([]*backuppb.LogFileSubcompaction{ + iter.FromSlice([]logclient.SSTs{ fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), fakeSubCompactionWithOneSst(1, 100, 32*units.MiB, 10), @@ -1823,7 +1823,7 @@ func TestCompactedSplitStrategyWithCheckpoint(t *testing.T) { }, }, { - iter.FromSlice([]*backuppb.LogFileSubcompaction{ + iter.FromSlice([]logclient.SSTs{ fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), fakeSubCompactionWithOneSst(2, 100, 32*units.MiB, 300), @@ -1846,7 +1846,7 @@ func TestCompactedSplitStrategyWithCheckpoint(t *testing.T) { }, }, { - iter.FromSlice([]*backuppb.LogFileSubcompaction{ + iter.FromSlice([]logclient.SSTs{ fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), fakeSubCompactionWithOneSst(1, 200, 32*units.MiB, 200), fakeSubCompactionWithOneSst(2, 100, 32*units.MiB, 300), @@ -1869,7 +1869,7 @@ func TestCompactedSplitStrategyWithCheckpoint(t *testing.T) { }, }, { - iter.FromSlice([]*backuppb.LogFileSubcompaction{ + iter.FromSlice([]logclient.SSTs{ fakeSubCompactionWithOneSst(1, 100, 16*units.MiB, 100), fakeSubCompactionWithMultiSsts(1, 200, 32*units.MiB, 200), fakeSubCompactionWithOneSst(2, 100, 32*units.MiB, 300), @@ -1900,7 +1900,7 @@ func TestCompactedSplitStrategyWithCheckpoint(t *testing.T) { mockPDCli.SetRegions(oriRegions) client := split.NewClient(mockPDCli, nil, nil, 100, 4) - wrapper := restore.PipelineRestorerWrapper[*backuppb.LogFileSubcompaction]{ + wrapper := restore.PipelineRestorerWrapper[logclient.SSTs]{ PipelineRegionsSplitter: split.NewPipelineRegionsSplitter(client, 4*units.MB, 400), } totalSize := 0 @@ -1932,8 +1932,8 @@ func TestCompactedSplitStrategyWithCheckpoint(t *testing.T) { } } -func fakeSubCompactionWithMultiSsts(tableID, rowID int64, length uint64, num uint64) *backuppb.LogFileSubcompaction { - return &backuppb.LogFileSubcompaction{ +func fakeSubCompactionWithMultiSsts(tableID, rowID int64, length uint64, num uint64) logclient.SSTs { + return &logclient.CompactedSSTs{&backuppb.LogFileSubcompaction{ Meta: &backuppb.LogFileSubcompactionMeta{ TableId: tableID, }, @@ -1953,10 +1953,10 @@ func fakeSubCompactionWithMultiSsts(tableID, rowID int64, length uint64, num uin TotalKvs: num, }, }, - } + }} } -func fakeSubCompactionWithOneSst(tableID, rowID int64, length uint64, num uint64) *backuppb.LogFileSubcompaction { - return &backuppb.LogFileSubcompaction{ +func fakeSubCompactionWithOneSst(tableID, rowID int64, length uint64, num uint64) logclient.SSTs { + return &logclient.CompactedSSTs{&backuppb.LogFileSubcompaction{ Meta: &backuppb.LogFileSubcompactionMeta{ TableId: tableID, }, @@ -1969,7 +1969,7 @@ func fakeSubCompactionWithOneSst(tableID, rowID int64, length uint64, num uint64 TotalKvs: num, }, }, - } + }} } func fakeFile(tableID, rowID int64, length uint64, num int64) *backuppb.DataFileInfo { diff --git a/br/pkg/restore/log_client/compacted_file_strategy.go b/br/pkg/restore/log_client/compacted_file_strategy.go index 9637cf2e529b6..1e104de5c5aea 100644 --- a/br/pkg/restore/log_client/compacted_file_strategy.go +++ b/br/pkg/restore/log_client/compacted_file_strategy.go @@ -7,9 +7,9 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" + "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/restore/split" - restoreutils "github.com/pingcap/tidb/br/pkg/restore/utils" - "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/br/pkg/restore/utils" "go.uber.org/zap" ) @@ -23,10 +23,10 @@ type CompactedFileSplitStrategy struct { checkpointFileProgressFn func(uint64, uint64) } -var _ split.SplitStrategy[*backuppb.LogFileSubcompaction] = &CompactedFileSplitStrategy{} +var _ split.SplitStrategy[SSTs] = &CompactedFileSplitStrategy{} func NewCompactedFileSplitStrategy( - rules map[int64]*restoreutils.RewriteRules, + rules map[int64]*utils.RewriteRules, checkpointsSet map[string]struct{}, updateStatsFn func(uint64, uint64), ) *CompactedFileSplitStrategy { @@ -37,19 +37,50 @@ func NewCompactedFileSplitStrategy( } } -func (cs *CompactedFileSplitStrategy) Accumulate(subCompaction *backuppb.LogFileSubcompaction) { - splitHelper, exist := cs.TableSplitter[subCompaction.Meta.TableId] +type sstIdentity struct { + EffectiveID int64 + RewriteBoundary *utils.RewriteRules +} + +func (cs *CompactedFileSplitStrategy) inspect(ssts SSTs) sstIdentity { + r, ok := ssts.(RewrittenSSTs) + if !ok || r.RewrittenTo() == ssts.TableID() { + return sstIdentity{ + EffectiveID: ssts.TableID(), + RewriteBoundary: nil, + } + } + + rule := utils.GetRewriteRuleOfTable(ssts.TableID(), r.RewrittenTo(), 0, map[int64]int64{}, false) + + return sstIdentity{ + EffectiveID: r.RewrittenTo(), + RewriteBoundary: rule, + } +} + +func (cs *CompactedFileSplitStrategy) Accumulate(ssts SSTs) { + identity := cs.inspect(ssts) + + splitHelper, exist := cs.TableSplitter[identity.EffectiveID] if !exist { splitHelper = split.NewSplitHelper() - cs.TableSplitter[subCompaction.Meta.TableId] = splitHelper + log.Info("Initialized splitter for table.", + zap.Int64("table-id", ssts.TableID()), zap.Int64("effective-id", identity.EffectiveID), zap.Stringer("rewrite-boundary", identity.RewriteBoundary)) + cs.TableSplitter[identity.EffectiveID] = splitHelper } - for _, f := range subCompaction.SstOutputs { - startKey := codec.EncodeBytes(nil, f.StartKey) - endKey := codec.EncodeBytes(nil, f.EndKey) + for _, f := range ssts.GetSSTs() { + startKey, endKey, err := utils.GetRewriteRawKeys(f, identity.RewriteBoundary) + if err != nil { + log.Panic("[unreachable] the rewrite rule doesn't match the SST file, this shouldn't happen...", + logutil.ShortError(err), zap.Stringer("rule", identity.RewriteBoundary), zap.Int64("effective-id", identity.EffectiveID), + zap.Stringer("file", f), + ) + } cs.AccumulateCount += 1 if f.TotalKvs == 0 || f.Size_ == 0 { - log.Error("No key-value pairs in subcompaction", zap.String("name", f.Name)) + log.Warn("No key-value pairs in sst files", zap.String("name", f.Name)) continue } // The number of MVCC entries in the compacted SST files can be excessive. @@ -82,14 +113,38 @@ func (cs *CompactedFileSplitStrategy) ShouldSplit() bool { return cs.AccumulateCount > (4096 / impactFactor) } -func (cs *CompactedFileSplitStrategy) ShouldSkip(subCompaction *backuppb.LogFileSubcompaction) bool { - _, exist := cs.Rules[subCompaction.Meta.TableId] - if !exist { - log.Info("skip for no rule files", zap.Int64("tableID", subCompaction.Meta.TableId)) +func hasRule[T any](ssts SSTs, rules map[int64]T) bool { + if r, ok := ssts.(RewrittenSSTs); ok { + _, exist := rules[r.RewrittenTo()] + // If the SST has been rewritten (logically has another table ID), + // don't check table ID in its physical file, or we may mistakenly match it + // with another table that has the same ID. + // + // An example, if there are tables: + // + // - Foo.ID = 1 (Backup Data) + // - Foo.ID = 10 (Upstream after Rewriting) + // - Bar.ID = 1 (Upstream Natively) + // + // If we treat `Foo` in the backup data as if it had table ID `1`, + // the restore progress may match it with `Bar`. + return exist + } + + if _, exist := rules[ssts.TableID()]; exist { + return true + } + + return false +} + +func (cs *CompactedFileSplitStrategy) ShouldSkip(ssts SSTs) bool { + if !hasRule(ssts, cs.Rules) { + log.Warn("skip for no rule files", zap.Int64("tableID", ssts.TableID()), zap.Any("ssts", ssts)) return true } - sstOutputs := make([]*backuppb.File, 0, len(subCompaction.SstOutputs)) - for _, sst := range subCompaction.SstOutputs { + sstOutputs := make([]*backuppb.File, 0, len(ssts.GetSSTs())) + for _, sst := range ssts.GetSSTs() { if _, ok := cs.checkpointSets[sst.Name]; !ok { sstOutputs = append(sstOutputs, sst) } else { @@ -100,12 +155,15 @@ func (cs *CompactedFileSplitStrategy) ShouldSkip(subCompaction *backuppb.LogFile } } if len(sstOutputs) == 0 { - log.Info("all files in sub compaction skipped") + log.Info("all files in SST set skipped", zap.Stringer("ssts", ssts)) return true } - if len(sstOutputs) != len(subCompaction.SstOutputs) { - log.Info("partial files in sub compaction skipped due to checkpoint") - subCompaction.SstOutputs = sstOutputs + if len(sstOutputs) != len(ssts.GetSSTs()) { + log.Info( + "partial files in SST set skipped due to checkpoint", + zap.Stringer("ssts", ssts), zap.Int("origin", len(ssts.GetSSTs())), zap.Int("output", len(sstOutputs)), + ) + ssts.SetSSTs(sstOutputs) return false } return false diff --git a/br/pkg/restore/log_client/export_test.go b/br/pkg/restore/log_client/export_test.go index 9c95409c9d754..cb5b6697dd173 100644 --- a/br/pkg/restore/log_client/export_test.go +++ b/br/pkg/restore/log_client/export_test.go @@ -127,3 +127,19 @@ func (helper *FakeStreamMetadataHelper) ReadFile( ) ([]byte, error) { return helper.Data[offset : offset+length], nil } + +func (w *WithMigrations) AddIngestedSSTs(extPath string) { + w.fullBackups = append(w.fullBackups, extPath) +} + +func (w *WithMigrations) SetRestoredTS(ts uint64) { + w.restoredTS = ts +} + +func (w *WithMigrations) SetStartTS(ts uint64) { + w.startTS = ts +} + +func (w *WithMigrations) CompactionDirs() []string { + return w.compactionDirs +} diff --git a/br/pkg/restore/log_client/log_file_manager.go b/br/pkg/restore/log_client/log_file_manager.go index 4c2992467a2ab..d2738568c8d21 100644 --- a/br/pkg/restore/log_client/log_file_manager.go +++ b/br/pkg/restore/log_client/log_file_manager.go @@ -9,6 +9,7 @@ import ( "fmt" "strings" "sync" + "sync/atomic" "time" "github.com/pingcap/errors" @@ -26,8 +27,6 @@ import ( "go.uber.org/zap" ) -var TotalEntryCount int64 - // MetaIter is the type of iterator of metadata files' content. type MetaIter = iter.TryNextor[*backuppb.Metadata] @@ -86,6 +85,12 @@ type streamMetadataHelper interface { ParseToMetadata(rawMetaData []byte) (*backuppb.Metadata, error) } +type logFilesStatistic struct { + NumEntries int64 + NumFiles uint64 + Size uint64 +} + // LogFileManager is the manager for log files of a certain restoration, // which supports read / filter from the log backup archive with static start TS / restore TS. type LogFileManager struct { @@ -107,6 +112,10 @@ type LogFileManager struct { withMigrations *WithMigrations metadataDownloadBatchSize uint + + // The output channel for statistics. + // This will be collected when reading the metadata. + Stats *logFilesStatistic } // LogFileManagerInit is the config needed for initializing the log file manager. @@ -310,6 +319,18 @@ func (rc *LogFileManager) LoadDDLFilesAndCountDMLFiles(ctx context.Context) ([]L return rc.collectDDLFilesAndPrepareCache(ctx, mg) } +type loadDMLFilesConfig struct { + Statistic *logFilesStatistic +} + +type loadDMLFilesOption func(*loadDMLFilesConfig) + +func lDOptWithStatistics(s *logFilesStatistic) loadDMLFilesOption { + return func(c *loadDMLFilesConfig) { + c.Statistic = s + } +} + // LoadDMLFiles loads all DML files needs to be restored in the restoration. // This function returns a stream, because there are usually many DML files need to be restored. func (rc *LogFileManager) LoadDMLFiles(ctx context.Context) (LogIter, error) { @@ -334,7 +355,11 @@ func (rc *LogFileManager) FilterMetaFiles(ms MetaNameIter) MetaGroupIter { return true } // count the progress - TotalEntryCount += d.NumberOfEntries + if rc.Stats != nil { + atomic.AddInt64(&rc.Stats.NumEntries, d.NumberOfEntries) + atomic.AddUint64(&rc.Stats.NumFiles, 1) + atomic.AddUint64(&rc.Stats.Size, d.Length) + } return !d.IsMeta }) return DDLMetaGroup{ @@ -347,8 +372,43 @@ func (rc *LogFileManager) FilterMetaFiles(ms MetaNameIter) MetaGroupIter { } // Fetch compactions that may contain file less than the TS. -func (rc *LogFileManager) GetCompactionIter(ctx context.Context) iter.TryNextor[*backuppb.LogFileSubcompaction] { - return rc.withMigrations.Compactions(ctx, rc.storage) +func (rc *LogFileManager) GetCompactionIter(ctx context.Context) iter.TryNextor[SSTs] { + return iter.Map(rc.withMigrations.Compactions(ctx, rc.storage), func(c *backuppb.LogFileSubcompaction) SSTs { + return &CompactedSSTs{c} + }) +} + +func (rc *LogFileManager) GetIngestedSSTs(ctx context.Context) iter.TryNextor[SSTs] { + return iter.FlatMap(rc.withMigrations.IngestedSSTs(ctx, rc.storage), func(c *backuppb.IngestedSSTs) iter.TryNextor[SSTs] { + remap := map[int64]int64{} + for _, r := range c.RewrittenTables { + remap[r.AncestorUpstream] = r.Upstream + } + return iter.TryMap(iter.FromSlice(c.Files), func(f *backuppb.File) (SSTs, error) { + sst := &CopiedSST{File: f} + if id, ok := remap[sst.TableID()]; ok && id != sst.TableID() { + sst.Rewritten = backuppb.RewrittenTableID{ + AncestorUpstream: sst.TableID(), + Upstream: id, + } + } + return sst, nil + }) + }) +} + +func (rc *LogFileManager) CountExtraSSTTotalKVs(ctx context.Context) (int64, error) { + count := int64(0) + ssts := iter.ConcatAll(rc.GetCompactionIter(ctx), rc.GetIngestedSSTs(ctx)) + for err, ssts := range iter.AsSeq(ctx, ssts) { + if err != nil { + return 0, errors.Trace(err) + } + for _, sst := range ssts.GetSSTs() { + count += int64(sst.TotalKvs) + } + } + return count, nil } // the kv entry with ts, the ts is decoded from entry. diff --git a/br/pkg/restore/log_client/migration.go b/br/pkg/restore/log_client/migration.go index a7b4307e0f568..8bcbd6b79ad02 100644 --- a/br/pkg/restore/log_client/migration.go +++ b/br/pkg/restore/log_client/migration.go @@ -19,6 +19,7 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/br/pkg/stream" "github.com/pingcap/tidb/br/pkg/utils/iter" ) @@ -144,6 +145,7 @@ func (builder *WithMigrationsBuilder) coarseGrainedFilter(mig *backuppb.Migratio func (builder *WithMigrationsBuilder) Build(migs []*backuppb.Migration) WithMigrations { skipmap := make(metaSkipMap) compactionDirs := make([]string, 0, 8) + fullBackups := make([]string, 0, 8) for _, mig := range migs { // TODO: deal with TruncatedTo and DestructPrefix @@ -155,10 +157,15 @@ func (builder *WithMigrationsBuilder) Build(migs []*backuppb.Migration) WithMigr for _, c := range mig.Compactions { compactionDirs = append(compactionDirs, c.Artifacts) } + + fullBackups = append(fullBackups, mig.IngestedSstPaths...) } withMigrations := WithMigrations{ skipmap: skipmap, compactionDirs: compactionDirs, + fullBackups: fullBackups, + restoredTS: builder.restoredTS, + startTS: builder.startTS, } return withMigrations } @@ -210,6 +217,9 @@ func (mwm *MetaWithMigrations) Physicals(groupIndexIter GroupIndexIter) Physical type WithMigrations struct { skipmap metaSkipMap compactionDirs []string + fullBackups []string + restoredTS uint64 + startTS uint64 } func (wm *WithMigrations) Metas(metaNameIter MetaNameIter) MetaMigrationsIter { @@ -238,3 +248,18 @@ func (wm *WithMigrations) Compactions(ctx context.Context, s storage.ExternalSto return Subcompactions(ctx, name, s) }) } + +func (wm *WithMigrations) IngestedSSTs(ctx context.Context, s storage.ExternalStorage) iter.TryNextor[*backuppb.IngestedSSTs] { + filteredOut := iter.FilterOut(stream.LoadIngestedSSTs(ctx, s, wm.fullBackups), func(ebk stream.IngestedSSTsGroup) bool { + gts := ebk.GroupTS() + // Note: if a backup happens during restoring, though its `backupts` is less than the ingested ssts' groupts, + // it is still possible that it backed the restored stuffs up. + // When combining with PiTR, those contents may be restored twice. But it seems harmless for now. + return !ebk.GroupFinished() || gts < wm.startTS || gts > wm.restoredTS + }) + return iter.FlatMap(filteredOut, func(ebk stream.IngestedSSTsGroup) iter.TryNextor[*backuppb.IngestedSSTs] { + return iter.Map(iter.FromSlice(ebk), func(p stream.PathedIngestedSSTs) *backuppb.IngestedSSTs { + return p.IngestedSSTs + }) + }) +} diff --git a/br/pkg/restore/log_client/migration_test.go b/br/pkg/restore/log_client/migration_test.go index 5368d7416dadf..b03d6bd08116a 100644 --- a/br/pkg/restore/log_client/migration_test.go +++ b/br/pkg/restore/log_client/migration_test.go @@ -16,11 +16,15 @@ package logclient_test import ( "context" + "errors" "fmt" "testing" + "github.com/google/uuid" + "github.com/pingcap/failpoint" backuppb "github.com/pingcap/kvproto/pkg/brpb" logclient "github.com/pingcap/tidb/br/pkg/restore/log_client" + "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/utils/iter" "github.com/stretchr/testify/require" ) @@ -350,3 +354,169 @@ func TestMigrations(t *testing.T) { } } } + +type efOP func(*backuppb.IngestedSSTs) + +func extFullBkup(ops ...efOP) *backuppb.IngestedSSTs { + ef := &backuppb.IngestedSSTs{} + for _, op := range ops { + op(ef) + } + return ef +} + +func finished() efOP { + return func(ef *backuppb.IngestedSSTs) { + ef.Finished = true + } +} + +func makeID() efOP { + id := uuid.New() + return func(ef *backuppb.IngestedSSTs) { + ef.BackupUuid = id[:] + } +} + +func prefix(pfx string) efOP { + return func(ef *backuppb.IngestedSSTs) { + ef.FilesPrefixHint = pfx + } +} + +func asIfTS(ts uint64) efOP { + return func(ef *backuppb.IngestedSSTs) { + ef.AsIfTs = ts + } +} + +func pef(t *testing.T, fb *backuppb.IngestedSSTs, sn int, s storage.ExternalStorage) string { + path := fmt.Sprintf("extbackupmeta_%08d", sn) + bs, err := fb.Marshal() + if err != nil { + require.NoError(t, err) + } + + err = s.WriteFile(context.Background(), path, bs) + require.NoError(t, err) + return path +} + +// tmp creates a temporary storage. +func tmp(t *testing.T) *storage.LocalStorage { + tmpDir := t.TempDir() + s, err := storage.NewLocalStorage(tmpDir) + require.NoError(t, err) + s.IgnoreEnoentForDelete = true + return s +} + +func assertFullBackupPfxs(t *testing.T, it iter.TryNextor[*backuppb.IngestedSSTs], items ...string) { + actItems := []string{} + for err, item := range iter.AsSeq(context.Background(), it) { + require.NoError(t, err) + actItems = append(actItems, item.FilesPrefixHint) + } + require.ElementsMatch(t, actItems, items) +} + +func TestNotRestoreIncomplete(t *testing.T) { + ctx := context.Background() + strg := tmp(t) + ebk := extFullBkup(prefix("001"), asIfTS(90), makeID()) + wm := new(logclient.WithMigrations) + wm.AddIngestedSSTs(pef(t, ebk, 0, strg)) + wm.SetRestoredTS(91) + + assertFullBackupPfxs(t, wm.IngestedSSTs(ctx, strg)) +} + +func TestRestoreSegmented(t *testing.T) { + ctx := context.Background() + strg := tmp(t) + id := makeID() + ebk1 := extFullBkup(prefix("001"), id) + ebk2 := extFullBkup(prefix("002"), asIfTS(90), finished(), id) + wm := new(logclient.WithMigrations) + wm.AddIngestedSSTs(pef(t, ebk1, 0, strg)) + wm.AddIngestedSSTs(pef(t, ebk2, 1, strg)) + wm.SetRestoredTS(91) + + assertFullBackupPfxs(t, wm.IngestedSSTs(ctx, strg), "001", "002") +} + +func TestFilteredOut(t *testing.T) { + ctx := context.Background() + strg := tmp(t) + id := makeID() + ebk1 := extFullBkup(prefix("001"), id) + ebk2 := extFullBkup(prefix("002"), asIfTS(90), finished(), id) + ebk3 := extFullBkup(prefix("003"), asIfTS(10), finished(), makeID()) + wm := new(logclient.WithMigrations) + wm.AddIngestedSSTs(pef(t, ebk1, 0, strg)) + wm.AddIngestedSSTs(pef(t, ebk2, 1, strg)) + wm.AddIngestedSSTs(pef(t, ebk3, 2, strg)) + wm.SetRestoredTS(89) + wm.SetStartTS(42) + + assertFullBackupPfxs(t, wm.IngestedSSTs(ctx, strg)) +} + +func TestMultiRestores(t *testing.T) { + ctx := context.Background() + strg := tmp(t) + id := makeID() + id2 := makeID() + + ebka1 := extFullBkup(prefix("001"), id) + ebkb1 := extFullBkup(prefix("101"), id2) + ebkb2 := extFullBkup(prefix("102"), asIfTS(88), finished(), id2) + ebka2 := extFullBkup(prefix("002"), asIfTS(90), finished(), id) + + wm := new(logclient.WithMigrations) + wm.AddIngestedSSTs(pef(t, ebka1, 0, strg)) + wm.AddIngestedSSTs(pef(t, ebkb1, 2, strg)) + wm.AddIngestedSSTs(pef(t, ebkb2, 3, strg)) + wm.AddIngestedSSTs(pef(t, ebka2, 4, strg)) + wm.SetRestoredTS(91) + + assertFullBackupPfxs(t, wm.IngestedSSTs(ctx, strg), "101", "102", "001", "002") +} + +func TestMultiFilteredOutOne(t *testing.T) { + ctx := context.Background() + strg := tmp(t) + id := makeID() + id2 := makeID() + + ebka1 := extFullBkup(prefix("001"), id) + ebkb1 := extFullBkup(prefix("101"), id2) + ebkb2 := extFullBkup(prefix("102"), asIfTS(88), finished(), id2) + ebka2 := extFullBkup(prefix("002"), asIfTS(90), finished(), id) + + wm := new(logclient.WithMigrations) + wm.AddIngestedSSTs(pef(t, ebka1, 0, strg)) + wm.AddIngestedSSTs(pef(t, ebkb1, 2, strg)) + wm.AddIngestedSSTs(pef(t, ebkb2, 3, strg)) + wm.AddIngestedSSTs(pef(t, ebka2, 4, strg)) + wm.SetRestoredTS(89) + + assertFullBackupPfxs(t, wm.IngestedSSTs(ctx, strg), "101", "102") +} + +func TestError(t *testing.T) { + ctx := context.Background() + strg := tmp(t) + id := makeID() + ebk1 := extFullBkup(prefix("001"), id, finished()) + wm := new(logclient.WithMigrations) + wm.AddIngestedSSTs(pef(t, ebk1, 0, strg)) + wm.SetRestoredTS(91) + + failpoint.EnableCall("github.com/pingcap/tidb/br/pkg/stream/load-ingested-ssts-err", func(err *error) { + *err = errors.New("not my fault") + }) + + it := wm.IngestedSSTs(ctx, strg) + require.ErrorContains(t, it.TryNext(ctx).Err, "not my fault") +} diff --git a/br/pkg/restore/log_client/ssts.go b/br/pkg/restore/log_client/ssts.go new file mode 100644 index 0000000000000..778ba853e57b7 --- /dev/null +++ b/br/pkg/restore/log_client/ssts.go @@ -0,0 +1,124 @@ +// Copyright 2024 PingCAP, Inc. Licensed under Apache-2.0. + +package logclient + +import ( + "encoding/hex" + "fmt" + "log" + "sync/atomic" + + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/tidb/pkg/tablecodec" + "go.uber.org/zap" +) + +var ( + _ RewrittenSSTs = &CopiedSST{} +) + +// RewrittenSSTs is an extension to the `SSTs` that needs extra key rewriting. +// This allows a SST being restored "as if" it in another table. +// +// The name "rewritten" means that the SST has already been rewritten somewhere else -- +// before importing it, we need "replay" the rewrite on it. +// +// For example, if a SST contains content of table `1`. And `RewrittenTo` returns `10`, +// the downstream wants to rewrite table `10` to `100`: +// - When searching for rewrite rules for the SSTs, we will use the table ID `10`(`RewrittenTo()`). +// - When importing the SST, we will use the rewrite rule `1`(`TableID()`) -> `100`(RewriteRule). +type RewrittenSSTs interface { + // RewrittenTo returns the table ID that the SST should be treated as + // when doing filtering. + RewrittenTo() int64 +} + +// SSTs is an interface that represents a collection of SST files. +type SSTs interface { + fmt.Stringer + + // TableID returns the ID of the table associated with the SST files. + // This should be the same as the physical content's table ID. + TableID() int64 + // GetSSTs returns a slice of pointers to backuppb.File, representing the SST files. + GetSSTs() []*backuppb.File + // SetSSTs allows the user to override the internal SSTs to be restored. + // The input SST set should already be a subset of `GetSSTs.` + SetSSTs([]*backuppb.File) +} + +type CompactedSSTs struct { + *backuppb.LogFileSubcompaction +} + +func (s *CompactedSSTs) String() string { + return fmt.Sprintf("CompactedSSTs: %s", s.Meta) +} + +func (s *CompactedSSTs) TableID() int64 { + return s.Meta.TableId +} + +func (s *CompactedSSTs) GetSSTs() []*backuppb.File { + return s.SstOutputs +} + +func (s *CompactedSSTs) SetSSTs(files []*backuppb.File) { + s.SstOutputs = files +} + +type CopiedSST struct { + File *backuppb.File + Rewritten backuppb.RewrittenTableID + + cachedTableID atomic.Int64 +} + +func (s *CopiedSST) String() string { + return fmt.Sprintf("AddedSSTs: %s", s.File) +} + +func (s *CopiedSST) TableID() int64 { + cached := s.cachedTableID.Load() + if cached == 0 { + id := tablecodec.DecodeTableID(s.File.StartKey) + id2 := tablecodec.DecodeTableID(s.File.EndKey) + if id != id2 { + panic(fmt.Sprintf( + "yet restoring a SST with two adjacent tables not supported, they are %d and %d (start key = %s; end key = %s)", + id, + id2, + hex.EncodeToString(s.File.StartKey), + hex.EncodeToString(s.File.EndKey), + )) + } + s.cachedTableID.Store(id) + return id + } + + return cached +} + +func (s *CopiedSST) GetSSTs() []*backuppb.File { + if s.File == nil { + return nil + } + return []*backuppb.File{s.File} +} + +func (s *CopiedSST) SetSSTs(fs []*backuppb.File) { + if len(fs) == 0 { + s.File = nil + } + if len(fs) == 1 { + s.File = fs[0] + } + log.Panic("Too many files passed to AddedSSTs.SetSSTs.", zap.Any("input", fs)) +} + +func (s *CopiedSST) RewrittenTo() int64 { + if s.Rewritten.Upstream > 0 { + return s.Rewritten.Upstream + } + return s.TableID() +} diff --git a/br/pkg/restore/restorer.go b/br/pkg/restore/restorer.go index 9d999af9c09fc..c3e9034ce2001 100644 --- a/br/pkg/restore/restorer.go +++ b/br/pkg/restore/restorer.go @@ -276,7 +276,7 @@ func (m *MultiTablesRestorer) GoRestore(onProgress func(int64), batchFileSets .. m.ectx = opentracing.ContextWithSpan(m.ectx, span1) } - for _, batchFileSet := range batchFileSets { + for i, batchFileSet := range batchFileSets { if m.ectx.Err() != nil { log.Warn("Restoring encountered error and already stopped, give up remained files.", logutil.ShortError(m.ectx.Err())) @@ -287,15 +287,16 @@ func (m *MultiTablesRestorer) GoRestore(onProgress func(int64), batchFileSets .. } filesReplica := batchFileSet m.fileImporter.PauseForBackpressure() + cx := logutil.ContextWithField(m.ectx, zap.Int("sn", i)) m.workerPool.ApplyOnErrorGroup(m.eg, func() (restoreErr error) { fileStart := time.Now() defer func() { if restoreErr == nil { - log.Info("import files done", zap.Duration("take", time.Since(fileStart))) + logutil.CL(cx).Info("import files done", zap.Duration("take", time.Since(fileStart))) onProgress(int64(len(filesReplica))) } }() - if importErr := m.fileImporter.Import(m.ectx, filesReplica...); importErr != nil { + if importErr := m.fileImporter.Import(cx, filesReplica...); importErr != nil { return errors.Trace(importErr) } diff --git a/br/pkg/restore/snap_client/BUILD.bazel b/br/pkg/restore/snap_client/BUILD.bazel index 5df612e4750e6..8983aa6e671ef 100644 --- a/br/pkg/restore/snap_client/BUILD.bazel +++ b/br/pkg/restore/snap_client/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "client.go", "import.go", "pipeline_items.go", + "pitr_collector.go", "placement_rule_manager.go", "systable_restore.go", "tikv_sender.go", @@ -29,6 +30,8 @@ go_library( "//br/pkg/restore/split", "//br/pkg/restore/utils", "//br/pkg/storage", + "//br/pkg/stream", + "//br/pkg/streamhelper", "//br/pkg/summary", "//br/pkg/utils", "//br/pkg/version", @@ -39,6 +42,8 @@ go_library( "//pkg/meta", "//pkg/meta/model", "//pkg/parser/model", + "//pkg/metrics", + "//pkg/parser/ast", "//pkg/parser/mysql", "//pkg/tablecodec", "//pkg/util", @@ -55,9 +60,11 @@ go_library( "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_pingcap_log//:log", + "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//util", "@com_github_tikv_pd_client//:client", "@com_github_tikv_pd_client//http", + "@io_etcd_go_etcd_client_v3//:client", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//keepalive", "@org_golang_google_grpc//status", @@ -76,13 +83,14 @@ go_test( "export_test.go", "import_test.go", "main_test.go", + "pitr_collector_test.go", "placement_rule_manager_test.go", "systable_restore_test.go", "tikv_sender_test.go", ], embed = [":snap_client"], flaky = True, - shard_count = 19, + shard_count = 23, deps = [ "//br/pkg/errors", "//br/pkg/glue", @@ -93,6 +101,8 @@ go_test( "//br/pkg/restore/internal/import_client", "//br/pkg/restore/split", "//br/pkg/restore/utils", + "//br/pkg/storage", + "//br/pkg/stream", "//br/pkg/utils", "//pkg/domain", "//pkg/kv", @@ -105,6 +115,7 @@ go_test( "//pkg/types", "//pkg/util", "//pkg/util/codec", + "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/brpb", diff --git a/br/pkg/restore/snap_client/client.go b/br/pkg/restore/snap_client/client.go index 5883ce0e2e13f..4b143fb53330d 100644 --- a/br/pkg/restore/snap_client/client.go +++ b/br/pkg/restore/snap_client/client.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "github.com/google/uuid" "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -53,6 +54,7 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/metrics" tidbutil "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/redact" kvutil "github.com/tikv/client-go/v2/util" @@ -76,6 +78,7 @@ const minBatchDdlSize = 1 type SnapClient struct { restorer restore.SstRestorer + importer *SnapFileImporter // Use a closure to lazy load checkpoint runner getRestorerFn func(*checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType]) restore.SstRestorer // Tool clients used by SnapClient @@ -153,6 +156,10 @@ type SnapClient struct { checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType] checkpointChecksum map[int64]*checkpoint.ChecksumItem + + // restoreUUID is the UUID of this restore. + // restore from a checkpoint inherits the same restoreUUID. + restoreUUID uuid.UUID } // NewRestoreClient returns a new RestoreClient. @@ -326,6 +333,7 @@ func (rc *SnapClient) InitCheckpoint( if err != nil { return checkpointSetWithTableID, nil, errors.Trace(err) } + rc.restoreUUID = meta.RestoreUUID if meta.UpstreamClusterID != rc.backupMeta.ClusterId { return checkpointSetWithTableID, nil, errors.Errorf( @@ -377,10 +385,13 @@ func (rc *SnapClient) InitCheckpoint( } } else { // initialize the checkpoint metadata since it is the first time to restore. + restoreID := uuid.New() meta := &checkpoint.CheckpointMetadataForSnapshotRestore{ UpstreamClusterID: rc.backupMeta.ClusterId, RestoredTS: rc.backupMeta.EndVersion, + RestoreUUID: restoreID, } + rc.restoreUUID = restoreID // a nil config means undo function if config != nil { meta.SchedulersConfig = &pdutil.ClusterConfig{Schedulers: config.Schedulers, ScheduleCfg: config.ScheduleCfg} @@ -422,6 +433,35 @@ func makeDBPool(size uint, dbFactory func() (*tidallocdb.DB, error)) ([]*tidallo return dbPool, nil } +func (rc *SnapClient) InstallPiTRSupport(ctx context.Context, deps PiTRCollDep) error { + collector, err := newPiTRColl(ctx, deps) + if err != nil { + return errors.Trace(err) + } + if !collector.enabled { + return nil + } + if rc.IsIncremental() { + // Even there were an error, don't return it to confuse the user... + _ = collector.close() + return errors.Annotatef(berrors.ErrStreamLogTaskExist, "it seems there is a log backup task exists, "+ + "if an incremental restore were performed to such cluster, log backup cannot properly handle this, "+ + "the restore will be aborted, you may stop the log backup task, then restore, finally restart the task") + } + + collector.restoreUUID = rc.restoreUUID + if collector.restoreUUID == (uuid.UUID{}) { + collector.restoreUUID = uuid.New() + log.Warn("UUID not found(checkpoint not enabled?), generating a new UUID for backup.", + zap.Stringer("uuid", collector.restoreUUID)) + } + rc.importer.beforeIngestCallbacks = append(rc.importer.beforeIngestCallbacks, collector.onBatch) + rc.importer.closeCallbacks = append(rc.importer.closeCallbacks, func(sfi *SnapFileImporter) error { + return collector.close() + }) + return nil +} + // Init create db connection and domain for storage. func (rc *SnapClient) Init(g glue.Glue, store kv.Storage) error { // setDB must happen after set PolicyMode. @@ -532,7 +572,6 @@ func (rc *SnapClient) initClients(ctx context.Context, backend *backuppb.Storage metaClient := split.NewClient(rc.pdClient, rc.pdHTTPClient, rc.tlsConf, maxSplitKeysOnce, rc.storeCount+1, splitClientOpts...) importCli := importclient.NewImportClient(metaClient, rc.tlsConf, rc.keepaliveConf) - var fileImporter *SnapFileImporter opt := NewSnapFileImporterOptions( rc.cipher, metaClient, importCli, backend, rc.rewriteMode, stores, rc.concurrencyPerStore, createCallBacks, closeCallBacks, @@ -543,23 +582,23 @@ func (rc *SnapClient) initClients(ctx context.Context, backend *backuppb.Storage mode = Txn } // for raw/txn mode. use backupMeta.ApiVersion to create fileImporter - fileImporter, err = NewSnapFileImporter(ctx, rc.backupMeta.ApiVersion, mode, opt) + rc.importer, err = NewSnapFileImporter(ctx, rc.backupMeta.ApiVersion, mode, opt) if err != nil { return errors.Trace(err) } // Raw/Txn restore are not support checkpoint for now rc.getRestorerFn = func(checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType]) restore.SstRestorer { - return restore.NewSimpleSstRestorer(ctx, fileImporter, rc.workerPool, nil) + return restore.NewSimpleSstRestorer(ctx, rc.importer, rc.workerPool, nil) } } else { // or create a fileImporter with the cluster API version - fileImporter, err = NewSnapFileImporter( + rc.importer, err = NewSnapFileImporter( ctx, rc.dom.Store().GetCodec().GetAPIVersion(), TiDBFull, opt) if err != nil { return errors.Trace(err) } rc.getRestorerFn = func(checkpointRunner *checkpoint.CheckpointRunner[checkpoint.RestoreKeyType, checkpoint.RestoreValueType]) restore.SstRestorer { - return restore.NewMultiTablesRestorer(ctx, fileImporter, rc.workerPool, checkpointRunner) + return restore.NewMultiTablesRestorer(ctx, rc.importer, rc.workerPool, checkpointRunner) } } return nil @@ -864,7 +903,7 @@ func (rc *SnapClient) createTables( func (rc *SnapClient) createTablesBatch(ctx context.Context, tables []*metautil.Table, newTS uint64) ([]*CreatedTable, error) { eg, ectx := errgroup.WithContext(ctx) - rater := logutil.TraceRateOver(logutil.MetricTableCreatedCounter) + rater := logutil.TraceRateOver(metrics.RestoreTableCreatedCount) workers := tidbutil.NewWorkerPool(uint(len(rc.dbPool)), "Create Tables Worker") numOfTables := len(tables) createdTables := struct { @@ -948,7 +987,7 @@ func (rc *SnapClient) createTablesSingle( ) ([]*CreatedTable, error) { eg, ectx := errgroup.WithContext(ctx) workers := tidbutil.NewWorkerPool(uint(len(dbPool)), "DDL workers") - rater := logutil.TraceRateOver(logutil.MetricTableCreatedCounter) + rater := logutil.TraceRateOver(metrics.RestoreTableCreatedCount) createdTables := struct { sync.Mutex tables []*CreatedTable diff --git a/br/pkg/restore/snap_client/import.go b/br/pkg/restore/snap_client/import.go index c5289f27ac99a..fc0b471bda240 100644 --- a/br/pkg/restore/snap_client/import.go +++ b/br/pkg/restore/snap_client/import.go @@ -41,6 +41,7 @@ import ( "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/util/codec" kvutil "github.com/tikv/client-go/v2/util" "go.uber.org/zap" @@ -141,7 +142,8 @@ type SnapFileImporter struct { downloadTokensMap *storeTokenChannelMap ingestTokensMap *storeTokenChannelMap - closeCallbacks []func(*SnapFileImporter) error + closeCallbacks []func(*SnapFileImporter) error + beforeIngestCallbacks []func(context.Context, restore.BatchBackupFileSet) (afterIngest func() error, err error) concurrencyPerStore uint @@ -372,6 +374,18 @@ func (importer *SnapFileImporter) Import( ctx context.Context, backupFileSets ...restore.BackupFileSet, ) error { + delayCbs := []func() error{} + for i, cb := range importer.beforeIngestCallbacks { + d, err := cb(ctx, backupFileSets) + if err != nil { + return errors.Annotatef(err, "failed to executing the callback #%d", i) + } + if d != nil { + delayCbs = append(delayCbs, d) + } + } + + importBegin := time.Now() // Rewrite the start key and end key of file to scan regions startKey, endKey, err := importer.getKeyRangeForFiles(backupFileSets) if err != nil { @@ -386,7 +400,7 @@ func (importer *SnapFileImporter) Import( return errors.Trace(errScanRegion) } - log.Debug("scan regions", logutil.Key("start key", startKey), logutil.Key("end key", endKey), zap.Int("count", len(regionInfos))) + logutil.CL(ctx).Debug("scan regions", logutil.Key("start key", startKey), logutil.Key("end key", endKey), zap.Int("count", len(regionInfos))) start := time.Now() // Try to download and ingest the file in every region for _, regionInfo := range regionInfos { @@ -394,18 +408,18 @@ func (importer *SnapFileImporter) Import( // Try to download file. downloadMetas, errDownload := importer.download(ctx, info, backupFileSets, importer.cipher, importer.apiVersion) if errDownload != nil { - log.Warn("download file failed, retry later", + logutil.CL(ctx).Warn("download file failed, retry later", logutil.Region(info.Region), logutil.Key("startKey", startKey), logutil.Key("endKey", endKey), logutil.ShortError(errDownload)) return errors.Trace(errDownload) } - log.Debug("download file done", zap.Stringer("take", time.Since(start)), + logutil.CL(ctx).Debug("download file done", zap.Stringer("take", time.Since(start)), logutil.Key("start", startKey), logutil.Key("end", endKey)) start = time.Now() if errIngest := importer.ingest(ctx, info, downloadMetas); errIngest != nil { - log.Warn("ingest file failed, retry later", + logutil.CL(ctx).Warn("ingest file failed, retry later", logutil.Key("start", startKey), logutil.Key("end", endKey), logutil.SSTMetas(downloadMetas), @@ -413,14 +427,22 @@ func (importer *SnapFileImporter) Import( zap.Error(errIngest)) return errors.Trace(errIngest) } - log.Debug("ingest file done", logutil.Key("start", startKey), logutil.Key("end", endKey), zap.Stringer("take", time.Since(start))) + logutil.CL(ctx).Debug("ingest file done", logutil.Key("start", startKey), logutil.Key("end", endKey), zap.Stringer("take", time.Since(start))) } return nil }, utils.NewImportSSTBackoffer()) if err != nil { - log.Error("import sst file failed after retry, stop the whole progress", restore.ZapBatchBackupFileSet(backupFileSets), zap.Error(err)) + logutil.CL(ctx).Error("import sst file failed after retry, stop the whole progress", restore.ZapBatchBackupFileSet(backupFileSets), zap.Error(err)) return errors.Trace(err) } + metrics.RestoreImportFileSeconds.Observe(time.Since(importBegin).Seconds()) + + for i, cb := range delayCbs { + if err := cb(); err != nil { + return errors.Annotatef(err, "failed to execute the delaied callback #%d", i) + } + } + for _, files := range backupFileSets { for _, f := range files.SSTFiles { summary.CollectSuccessUnit(summary.TotalKV, 1, f.TotalKvs) @@ -456,7 +478,7 @@ func getSSTMetaFromFile( } // Get the column family of the file by the file name. - var cfName string + cfName := file.GetCf() if strings.Contains(file.GetName(), restoreutils.DefaultCFName) { cfName = restoreutils.DefaultCFName } else if strings.Contains(file.GetName(), restoreutils.WriteCFName) { @@ -531,15 +553,15 @@ func (importer *SnapFileImporter) download( failpoint.Inject("restore-storage-error", func(val failpoint.Value) { msg := val.(string) - log.Debug("failpoint restore-storage-error injected.", zap.String("msg", msg)) + logutil.CL(ctx).Debug("failpoint restore-storage-error injected.", zap.String("msg", msg)) e = errors.Annotate(e, msg) }) failpoint.Inject("restore-gRPC-error", func(_ failpoint.Value) { - log.Warn("the connection to TiKV has been cut by a neko, meow :3") + logutil.CL(ctx).Warn("the connection to TiKV has been cut by a neko, meow :3") e = status.Error(codes.Unavailable, "the connection to TiKV has been cut by a neko, meow :3") }) if isDecryptSstErr(e) { - log.Info("fail to decrypt when download sst, try again with no-crypt") + logutil.CL(ctx).Info("fail to decrypt when download sst, try again with no-crypt") if importer.kvMode == Raw || importer.kvMode == Txn { downloadMetas, e = importer.downloadRawKVSST(ctx, regionInfo, filesGroup, nil, apiVersion) } else { @@ -840,7 +862,7 @@ func (importer *SnapFileImporter) ingest( break } // do not get region info, wait a second and GetRegion() again. - log.Warn("ingest get region by key return nil", logutil.Region(info.Region), + logutil.CL(ctx).Warn("ingest get region by key return nil", logutil.Region(info.Region), logutil.SSTMetas(downloadMetas), ) time.Sleep(time.Second) @@ -850,7 +872,7 @@ func (importer *SnapFileImporter) ingest( if !split.CheckRegionEpoch(newInfo, info) { return errors.Trace(berrors.ErrKVEpochNotMatch) } - log.Debug("ingest sst returns not leader error, retry it", + logutil.CL(ctx).Debug("ingest sst returns not leader error, retry it", logutil.SSTMetas(downloadMetas), logutil.Region(info.Region), zap.Stringer("newLeader", newInfo.Leader)) @@ -893,7 +915,7 @@ func (importer *SnapFileImporter) ingestSSTs( Context: reqCtx, Ssts: sstMetas, } - log.Debug("ingest SSTs", logutil.SSTMetas(sstMetas), logutil.Leader(leader)) + logutil.CL(ctx).Debug("ingest SSTs", logutil.SSTMetas(sstMetas), logutil.Leader(leader)) resp, err := importer.importClient.MultiIngest(ctx, leader.GetStoreId(), req) return resp, errors.Trace(err) } diff --git a/br/pkg/restore/snap_client/pitr_collector.go b/br/pkg/restore/snap_client/pitr_collector.go new file mode 100644 index 0000000000000..37a04156062cd --- /dev/null +++ b/br/pkg/restore/snap_client/pitr_collector.go @@ -0,0 +1,509 @@ +package snapclient + +import ( + "context" + "fmt" + "path/filepath" + "sync" + "time" + + "github.com/google/uuid" + "github.com/pingcap/errors" + pb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/log" + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/br/pkg/stream" + "github.com/pingcap/tidb/br/pkg/streamhelper" + "github.com/pingcap/tidb/br/pkg/summary" + "github.com/pingcap/tidb/pkg/metrics" + "github.com/pingcap/tidb/pkg/util" + "github.com/tikv/client-go/v2/oracle" + pd "github.com/tikv/pd/client" + clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +type persistCall struct { + cx context.Context + cb func(error) +} + +// persisterHandle is a handle to the background writer persisting the metadata. +type persisterHandle struct { + hnd chan<- persistCall +} + +// close releases the handle. +func (w persisterHandle) close() { + if w.hnd != nil { + close(w.hnd) + } +} + +// write starts a request to persist the current metadata to the external storage. +// +// all modification before the `write` call will be persisted in the external storage +// after this returns. +func (w persisterHandle) write(ctx context.Context) error { + // A buffer here is necessrary. + // Or once the writerCall finished too fastly, it calls the callback before the `select` + // block entered, we may lose the response. + ch := make(chan error, 1) + w.hnd <- persistCall{ + cx: ctx, + cb: func(err error) { + select { + case ch <- err: + default: + log.Warn("Blocked when sending to a oneshot channel, dropping the message.", + logutil.AShortError("dropped-result", err), zap.StackSkip("caller", 1)) + } + }, + } + + select { + case err, ok := <-ch: + if !ok { + // Though the channel is never closed, we can still gracefully exit + // by canceling the context. + log.Panic("[unreachable] A channel excepted to be never closed was closed.") + } + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// goPersister spawns the background centeralized persister. +// +// this would be the sole goroutine that writes to `c.metaPath()`. +func (c *pitrCollector) goPersister() { + hnd := make(chan persistCall, 2048) + exhaust := func(f func(persistCall)) { + collect: + for { + select { + case cb, ok := <-hnd: + if !ok { + log.Warn("Early channel close. Should not happen.") + return + } + f(cb) + default: + break collect + } + } + } + + go func() { + for newCall := range hnd { + cs := []persistCall{newCall} + // Consuming all pending writes. + exhaust(func(newCall persistCall) { + cs = append(cs, newCall) + }) + + err := c.doPersistExtraBackupMeta(cs[0].cx) + + for _, c := range cs { + c.cb(err) + } + } + }() + + c.writerRoutine = persisterHandle{ + hnd: hnd, + } +} + +// pitrCollector controls the process of copying restored SSTs to a log backup storage. +// +// log backup cannot back `Ingest`ed SSTs directly. As a workaround, ingested SSTs will +// be copied to the log backup storage when restoring. Then when doing PiTR, those SSTs +// can be ingested. +// +// This provides two hooks to the `Importer` and those will be called syncrhonously. +// - `onBatch`: this starts the upload process of a batch of files, returns a closure that +// . waits unfinished upload process done. +// - `close`: flush all pending metadata and commit all SSTs to the log backup storage so +// . they are visible for a PiTR. +// The two hooks are goroutine safe. +type pitrCollector struct { + // Immutable state. + + // taskStorage is the log backup storage. + taskStorage storage.ExternalStorage + // restoreStorage is where the running restoration from. + restoreStorage storage.ExternalStorage + // name is a human-friendly identity to this restoration. + // When restart from a checkpoint, a new name will be generated. + name string + // enabled indicites whether the pitrCollector needs to work. + enabled bool + // restoreUUID is the identity of this restoration. + // This will be kept among restarting from checkpoints. + restoreUUID uuid.UUID + + // Mutable state. + ingestedSSTMeta ingestedSSTsMeta + ingestedSSTMetaLock sync.Mutex + putMigOnce sync.Once + writerRoutine persisterHandle + + // Delegates. + + // tso fetches a recent timestamp oracle from somewhere. + tso func(ctx context.Context) (uint64, error) + // restoreSuccess returns whether the restore was fully done. + restoreSuccess func() bool +} + +// ingestedSSTsMeta is state of already imported SSTs. +// +// This and only this will be fully persisted to the +// ingested ssts meta in the external storage. +type ingestedSSTsMeta struct { + msg pb.IngestedSSTs + rewrites map[int64]int64 +} + +// toProtoMessage generates the protocol buffer message to persist. +func (c *ingestedSSTsMeta) toProtoMessage() *pb.IngestedSSTs { + msg := util.ProtoV1Clone(&c.msg) + for old, new := range c.rewrites { + msg.RewrittenTables = append(msg.RewrittenTables, &pb.RewrittenTableID{AncestorUpstream: old, Upstream: new}) + } + return msg +} + +func (c *pitrCollector) close() error { + defer c.writerRoutine.close() + + if !c.enabled { + return nil + } + + cx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + if !c.restoreSuccess() { + log.Warn("Backup not success, put a half-finished metadata to the log backup.", + zap.Stringer("uuid", c.restoreUUID)) + return errors.Annotatef(c.persistExtraBackupMeta(cx), "failed to persist the meta") + } + + commitTS, err := c.commit(cx) + if err != nil { + return errors.Annotate(err, "failed to commit pitrCollector") + } + log.Info("Log backup SSTs are committed.", + zap.Uint64("commitTS", commitTS), zap.String("committedTo", c.outputPath())) + + return nil +} + +func (c *pitrCollector) verifyCompatibilityFor(fileset *restore.BackupFileSet) error { + if len(fileset.RewriteRules.NewKeyspace) > 0 { + return errors.Annotate(berrors.ErrUnsupportedOperation, "keyspace rewriting isn't supported when log backup enabled") + } + for i, r := range fileset.RewriteRules.Data { + if r.NewTimestamp > 0 { + return errors.Annotatef(berrors.ErrUnsupportedOperation, + "rewrite rule #%d: rewrite timestamp isn't supported when log backup enabled", i) + } + if r.IgnoreAfterTimestamp > 0 || r.IgnoreBeforeTimestamp > 0 { + return errors.Annotatef(berrors.ErrUnsupportedOperation, + "rewrite rule #%d: truncating timestamp isn't supported when log backup enabled", i) + } + } + return nil +} + +func (c *pitrCollector) onBatch(ctx context.Context, fileSets restore.BatchBackupFileSet) (func() error, error) { + if !c.enabled { + return nil, nil + } + + if err := c.prepareMigIfNeeded(ctx); err != nil { + return nil, err + } + + begin := time.Now() + eg, ectx := errgroup.WithContext(ctx) + fileCount := 0 + for _, fileSet := range fileSets { + if err := c.verifyCompatibilityFor(&fileSet); err != nil { + return nil, err + } + + for _, file := range fileSet.SSTFiles { + fileCount += 1 + eg.Go(func() error { + if err := c.putSST(ectx, file); err != nil { + return errors.Annotatef(err, "failed to put sst %s", file.GetName()) + } + return nil + }) + } + for _, hint := range fileSet.RewriteRules.TableIDRemapHint { + eg.Go(func() error { + if err := c.putRewriteRule(ectx, hint.Origin, hint.Rewritten); err != nil { + return errors.Annotatef(err, "failed to put rewrite rule of %v", fileSet.RewriteRules) + } + return nil + }) + } + } + + waitDone := func() error { + err := eg.Wait() + if err != nil { + logutil.CL(ctx).Warn("Failed to upload SSTs for future PiTR.", logutil.ShortError(err)) + return err + } + + logutil.CL(ctx).Info("Uploaded a batch of SSTs for future PiTR.", + zap.Duration("take", time.Since(begin)), zap.Int("file-count", fileCount)) + + err = c.persistExtraBackupMeta(ctx) + if err != nil { + return errors.Annotatef(err, "failed to persist backup meta when finishing batch") + } + return nil + } + return waitDone, nil +} + +func (c *pitrCollector) doWithMetaLock(f func()) { + c.ingestedSSTMetaLock.Lock() + f() + c.ingestedSSTMetaLock.Unlock() +} + +// outputPath constructs the path by a relative path for outputting. +func (c *pitrCollector) outputPath(segs ...string) string { + return filepath.Join(append([]string{"v1", "ext_backups", c.name}, segs...)...) +} + +func (c *pitrCollector) metaPath() string { + return c.outputPath("extbackupmeta") +} + +func (c *pitrCollector) sstPath(name string) string { + return c.outputPath("sst_files", name) +} + +// putSST records an SST file. +func (c *pitrCollector) putSST(ctx context.Context, f *pb.File) error { + if !c.enabled { + return nil + } + + begin := time.Now() + + f = util.ProtoV1Clone(f) + out := c.sstPath(f.Name) + + copier, ok := c.taskStorage.(storage.Copier) + if !ok { + return errors.Annotatef(berrors.ErrInvalidArgument, "storage %T does not support copying", c.taskStorage) + } + spec := storage.CopySpec{ + From: f.GetName(), + To: out, + } + + copyStart := time.Now() + if err := copier.CopyFrom(ctx, c.restoreStorage, spec); err != nil { + return errors.Annotatef(err, "failed to copy sst file %s to %s, "+ + "you may check whether permissions are granted in both %s and %s, "+ + "and the two storages are provided by the same cloud vendor", + spec.From, spec.To, c.restoreStorage.URI(), c.taskStorage.URI()) + } + log.Info("Copy SST to log backup storage success.", zap.String("file", f.Name), zap.Stringer("takes", time.Since(copyStart))) + + f.Name = out + c.doWithMetaLock(func() { c.ingestedSSTMeta.msg.Files = append(c.ingestedSSTMeta.msg.Files, f) }) + + metrics.RestoreUploadSSTForPiTRSeconds.Observe(time.Since(begin).Seconds()) + return nil +} + +// putRewriteRule records a rewrite rule. +func (c *pitrCollector) putRewriteRule(_ context.Context, oldID int64, newID int64) error { + if !c.enabled { + return nil + } + var err error + c.doWithMetaLock(func() { + if oldVal, ok := c.ingestedSSTMeta.rewrites[oldID]; ok && oldVal != newID { + err = errors.Annotatef( + berrors.ErrInvalidArgument, + "pitr coll rewrite rule conflict: we had %v -> %v, but you want rewrite to %v", + oldID, + oldVal, + newID, + ) + return + } + c.ingestedSSTMeta.rewrites[oldID] = newID + }) + return err +} + +// doPersistExtraBackupMeta writes the current content of extra backup meta to the external storage. +// This isn't goroutine-safe. Please don't call it concurrently. +func (c *pitrCollector) doPersistExtraBackupMeta(ctx context.Context) (err error) { + if !c.enabled { + return nil + } + + var bs []byte + begin := time.Now() + c.doWithMetaLock(func() { + msg := c.ingestedSSTMeta.toProtoMessage() + // Here, after generating a snapshot of the current message then we can continue. + // This requires only a single active writer at anytime. + // (i.e. concurrent call to `doPersistExtraBackupMeta` may cause data race.) + // If there are many writers, the writer gets a stale snapshot may overwrite + // the latest persisted file. + bs, err = msg.Marshal() + }) + + if err != nil { + return errors.Annotate(err, "failed to marsal the committing message") + } + logutil.CL(ctx).Info("Persisting extra backup meta.", + zap.Stringer("uuid", c.restoreUUID), zap.String("path", c.metaPath()), zap.Stringer("takes", time.Since(begin))) + + err = c.taskStorage.WriteFile(ctx, c.metaPath(), bs) + if err != nil { + return errors.Annotatef(err, "failed to put content to meta to %s", c.metaPath()) + } + + metrics.RestoreUploadSSTMetaForPiTRSeconds.Observe(time.Since(begin).Seconds()) + logutil.CL(ctx).Debug("Persisting extra backup meta.", + zap.Stringer("uuid", c.restoreUUID), zap.String("path", c.metaPath()), zap.Stringer("takes", time.Since(begin))) + return nil +} + +func (c *pitrCollector) persistExtraBackupMeta(ctx context.Context) (err error) { + if !c.enabled { + return nil + } + + return c.writerRoutine.write(ctx) +} + +// Commit commits the collected SSTs to a migration. +func (c *pitrCollector) prepareMig(ctx context.Context) error { + if !c.enabled { + return nil + } + + est := stream.MigrationExtension(c.taskStorage) + + m := stream.NewMigration() + m.IngestedSstPaths = append(m.IngestedSstPaths, c.metaPath()) + + _, err := est.AppendMigration(ctx, m) + if err != nil { + return errors.Annotatef(err, "failed to add the extra backup at path %s", c.metaPath()) + } + + c.doWithMetaLock(func() { + c.resetCommitting() + }) + // Persist the metadata in case of SSTs were uploaded but the meta wasn't, + // which leads to a leakage. + return c.persistExtraBackupMeta(ctx) +} + +func (c *pitrCollector) prepareMigIfNeeded(ctx context.Context) (err error) { + c.putMigOnce.Do(func() { + err = c.prepareMig(ctx) + }) + return +} + +func (c *pitrCollector) commit(ctx context.Context) (uint64, error) { + c.ingestedSSTMeta.msg.Finished = true + ts, err := c.tso(ctx) + if err != nil { + return 0, err + } + c.ingestedSSTMeta.msg.AsIfTs = ts + return ts, c.persistExtraBackupMeta(ctx) +} + +func (c *pitrCollector) resetCommitting() { + c.ingestedSSTMeta = ingestedSSTsMeta{ + rewrites: map[int64]int64{}, + } + c.ingestedSSTMeta.msg.FilesPrefixHint = c.sstPath("") + c.ingestedSSTMeta.msg.Finished = false + c.ingestedSSTMeta.msg.BackupUuid = c.restoreUUID[:] +} + +// PiTRCollDep is the dependencies of a PiTR collector. +type PiTRCollDep struct { + PDCli pd.Client + EtcdCli *clientv3.Client + Storage *pb.StorageBackend +} + +// newPiTRColl creates a new PiTR collector. +func newPiTRColl(ctx context.Context, deps PiTRCollDep) (*pitrCollector, error) { + mcli := streamhelper.NewMetaDataClient(deps.EtcdCli) + ts, err := mcli.GetAllTasks(ctx) + if err != nil { + return nil, errors.Trace(err) + } + if len(ts) > 1 { + taskNames := []string{} + for _, t := range ts { + taskNames = append(taskNames, t.Info.Name) + } + return nil, errors.Annotatef(berrors.ErrInvalidArgument, + "more than one task found, pitr collector doesn't support that, tasks are: %#v", taskNames) + } + if len(ts) == 0 { + return &pitrCollector{}, nil + } + + coll := &pitrCollector{ + enabled: true, + } + + strg, err := storage.Create(ctx, ts[0].Info.Storage, false) + if err != nil { + return nil, errors.Trace(err) + } + coll.taskStorage = strg + + tso := func(ctx context.Context) (uint64, error) { + l, o, err := deps.PDCli.GetTS(ctx) + return oracle.ComposeTS(l, o), err + } + coll.tso = tso + + t, err := tso(ctx) + if err != nil { + return nil, errors.Trace(err) + } + coll.name = fmt.Sprintf("backup-%016X", t) + + restoreStrg, err := storage.Create(ctx, deps.Storage, false) + if err != nil { + return nil, errors.Trace(err) + } + coll.restoreStorage = restoreStrg + coll.restoreSuccess = summary.Succeed + coll.goPersister() + coll.resetCommitting() + return coll, nil +} diff --git a/br/pkg/restore/snap_client/pitr_collector_test.go b/br/pkg/restore/snap_client/pitr_collector_test.go new file mode 100644 index 0000000000000..e3b06cfbe38ae --- /dev/null +++ b/br/pkg/restore/snap_client/pitr_collector_test.go @@ -0,0 +1,271 @@ +// Copyright 2025 PingCAP, Inc. Licensed under Apache-2.0. + +package snapclient + +import ( + "context" + "fmt" + "sync/atomic" + "testing" + + "github.com/google/uuid" + backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/tidb/br/pkg/restore" + "github.com/pingcap/tidb/br/pkg/restore/utils" + "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/br/pkg/stream" + "github.com/stretchr/testify/require" +) + +func tmp(t *testing.T) *storage.LocalStorage { + tmpDir := t.TempDir() + s, err := storage.NewLocalStorage(tmpDir) + require.NoError(t, err) + s.IgnoreEnoentForDelete = true + return s +} + +type pitrCollectorT struct { + t *testing.T + coll *pitrCollector + tsoCnt *atomic.Uint64 + success *atomic.Bool + cx context.Context +} + +func (p pitrCollectorT) RestoreAFile(fs restore.BatchBackupFileSet) func() error { + for _, b := range fs { + for _, file := range b.SSTFiles { + require.NoError(p.t, p.coll.restoreStorage.WriteFile(p.cx, file.Name, []byte("something"))) + } + } + + res, err := p.coll.onBatch(p.cx, fs) + require.NoError(p.t, err) + return res +} + +func (p pitrCollectorT) Done() { + require.NoError(p.t, p.coll.close()) +} + +func (p pitrCollectorT) ExtFullBkups() []backuppb.IngestedSSTs { + est := stream.MigrationExtension(p.coll.taskStorage) + migs, err := est.Load(p.cx) + require.NoError(p.t, err) + res := []backuppb.IngestedSSTs{} + for _, m := range migs.ListAll() { + for _, pth := range m.IngestedSstPaths { + content, err := p.coll.taskStorage.ReadFile(p.cx, pth) + require.NoError(p.t, err) + var sst backuppb.IngestedSSTs + require.NoError(p.t, sst.Unmarshal(content)) + res = append(res, sst) + } + } + return res +} + +func (p *pitrCollectorT) MarkSuccess() { + p.success.Store(true) +} + +func (p *pitrCollectorT) Reopen() { + newColl := &pitrCollector{ + enabled: p.coll.enabled, + taskStorage: p.coll.taskStorage, + restoreStorage: p.coll.restoreStorage, + name: fmt.Sprintf("test-%s-%d", p.t.Name(), p.tsoCnt.Add(1)), + restoreUUID: p.coll.restoreUUID, + tso: p.coll.tso, + restoreSuccess: p.coll.restoreSuccess, + } + p.success.Store(false) + p.coll = newColl + + p.coll.resetCommitting() + p.coll.goPersister() +} + +func (p pitrCollectorT) RequireCopied(extBk backuppb.IngestedSSTs, files ...string) { + extFiles := make([]string, 0) + for _, f := range extBk.Files { + extFiles = append(extFiles, f.Name) + } + + locatedFiles := make([]string, 0) + for _, f := range files { + locatedFiles = append(locatedFiles, p.coll.sstPath(f)) + } + + require.ElementsMatch(p.t, extFiles, locatedFiles) +} + +func (p pitrCollectorT) RequireRewrite(extBk backuppb.IngestedSSTs, rules ...utils.TableIDRemap) { + rulesInExtBk := []utils.TableIDRemap{} + for _, f := range extBk.RewrittenTables { + rulesInExtBk = append(rulesInExtBk, utils.TableIDRemap{ + Origin: f.AncestorUpstream, + Rewritten: f.Upstream, + }) + } + require.ElementsMatch(p.t, rulesInExtBk, rules) +} + +func newPiTRCollForTest(t *testing.T) pitrCollectorT { + taskStorage := tmp(t) + restoreStorage := tmp(t) + + coll := &pitrCollector{ + enabled: true, + taskStorage: taskStorage, + restoreStorage: restoreStorage, + name: "test-" + t.Name(), + restoreUUID: uuid.New(), + } + tsoCnt := new(atomic.Uint64) + restoreSuccess := new(atomic.Bool) + coll.tso = func(ctx context.Context) (uint64, error) { + return tsoCnt.Add(1), nil + } + coll.restoreSuccess = restoreSuccess.Load + coll.goPersister() + coll.resetCommitting() + + return pitrCollectorT{ + t: t, + coll: coll, + tsoCnt: tsoCnt, + success: restoreSuccess, + cx: context.Background(), + } +} + +type backupFileSetOp func(*restore.BackupFileSet) + +func backupFileSet(ops ...backupFileSetOp) restore.BackupFileSet { + set := restore.BackupFileSet{ + RewriteRules: new(utils.RewriteRules), + } + for _, op := range ops { + op(&set) + } + return set +} + +func nameFile(n string) *backuppb.File { + return &backuppb.File{ + Name: n, + } +} + +func withFile(f *backuppb.File) backupFileSetOp { + return func(set *restore.BackupFileSet) { + set.SSTFiles = append(set.SSTFiles, f) + } +} + +func remap(from, to int64) utils.TableIDRemap { + return utils.TableIDRemap{Origin: from, Rewritten: to} +} + +func withRewriteRule(hints ...utils.TableIDRemap) backupFileSetOp { + return func(set *restore.BackupFileSet) { + set.RewriteRules.TableIDRemapHint = append(set.RewriteRules.TableIDRemapHint, hints...) + } +} + +func TestCollAFile(t *testing.T) { + coll := newPiTRCollForTest(t) + batch := restore.BatchBackupFileSet{backupFileSet(withFile(nameFile("foo.txt")))} + + require.NoError(t, coll.RestoreAFile(batch)()) + coll.MarkSuccess() + coll.Done() + + exts := coll.ExtFullBkups() + require.Len(t, exts, 1) + e := exts[0] + coll.RequireCopied(e, "foo.txt") + require.True(t, e.Finished, "%v", e) + require.Equal(t, coll.coll.restoreUUID[:], e.BackupUuid) +} + +func TestCollManyFileAndRewriteRules(t *testing.T) { + coll := newPiTRCollForTest(t) + batch := restore.BatchBackupFileSet{ + backupFileSet(withFile(nameFile("foo.txt"))), + backupFileSet(withFile(nameFile("bar.txt")), withRewriteRule(remap(1, 10))), + backupFileSet(withFile(nameFile("baz.txt")), withRewriteRule(remap(2, 20))), + backupFileSet(withFile(nameFile("quux.txt")), withRewriteRule(remap(3, 21))), + } + + require.NoError(t, coll.RestoreAFile(batch)()) + coll.MarkSuccess() + coll.Done() + + exts := coll.ExtFullBkups() + require.Len(t, exts, 1) + e := exts[0] + coll.RequireCopied(e, "foo.txt", "bar.txt", "baz.txt", "quux.txt") + coll.RequireRewrite(e, remap(1, 10), remap(2, 20), remap(3, 21)) + require.True(t, e.Finished, "%v", e) + require.Equal(t, coll.coll.restoreUUID[:], e.BackupUuid) +} + +func TestReopen(t *testing.T) { + coll := newPiTRCollForTest(t) + batch1 := restore.BatchBackupFileSet{ + backupFileSet(withFile(nameFile("foo.txt"))), + backupFileSet(withFile(nameFile("bar.txt")), withRewriteRule(remap(1, 10)))} + batch2 := restore.BatchBackupFileSet{backupFileSet(withFile(nameFile("baz.txt")), withRewriteRule(remap(2, 20)))} + batch3 := restore.BatchBackupFileSet{backupFileSet(withFile(nameFile("quux.txt")), withRewriteRule(remap(3, 21)))} + + require.NoError(t, coll.RestoreAFile(batch1)()) + coll.Done() + exts := coll.ExtFullBkups() + require.Len(t, exts, 1) + e := exts[0] + coll.RequireCopied(e, "foo.txt", "bar.txt") + coll.RequireRewrite(e, remap(1, 10)) + require.False(t, e.Finished, "%v", e) + require.Equal(t, coll.coll.restoreUUID[:], e.BackupUuid) + + coll.Reopen() + require.NoError(t, coll.RestoreAFile(batch2)()) + exts = coll.ExtFullBkups() + require.Len(t, exts, 2) + e = exts[1] + coll.RequireCopied(e, "baz.txt") + coll.RequireRewrite(e, remap(2, 20)) + require.False(t, e.Finished, "%v", e) + require.Equal(t, coll.coll.restoreUUID[:], e.BackupUuid) + coll.coll.writerRoutine.close() + + coll.Reopen() + require.NoError(t, coll.RestoreAFile(batch3)()) + coll.MarkSuccess() + coll.Done() + exts = coll.ExtFullBkups() + require.Len(t, exts, 3) + e = exts[2] + coll.RequireCopied(e, "quux.txt") + coll.RequireRewrite(e, remap(3, 21)) + require.True(t, e.Finished, "%v", e) + require.Equal(t, coll.coll.restoreUUID[:], e.BackupUuid) +} + +func TestConflict(t *testing.T) { + coll := newPiTRCollForTest(t) + batch := restore.BatchBackupFileSet{ + backupFileSet(withFile(nameFile("foo.txt")), withRewriteRule(remap(1, 10))), + backupFileSet(withFile(nameFile("foo.txt")), withRewriteRule(remap(1, 11))), + } + + cb, err := coll.coll.onBatch(coll.cx, batch) + // NOTE: An error here is also acceptable. + require.NoError(t, err) + require.Error(t, cb()) + + coll.Done() +} diff --git a/br/pkg/restore/utils/BUILD.bazel b/br/pkg/restore/utils/BUILD.bazel index a40f0a883ae09..c29bd0f813428 100644 --- a/br/pkg/restore/utils/BUILD.bazel +++ b/br/pkg/restore/utils/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "//br/pkg/rtree", "//pkg/meta/model", "//pkg/tablecodec", + "//pkg/util", "//pkg/util/codec", "//pkg/util/redact", "@com_github_pingcap_errors//:errors", diff --git a/br/pkg/restore/utils/rewrite_rule.go b/br/pkg/restore/utils/rewrite_rule.go index a664d97a5f11d..67fe7cf312675 100644 --- a/br/pkg/restore/utils/rewrite_rule.go +++ b/br/pkg/restore/utils/rewrite_rule.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/br/pkg/rtree" "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/redact" "go.uber.org/zap" @@ -47,6 +48,44 @@ type RewriteRules struct { NewKeyspace []byte // used to record checkpoint data NewTableID int64 + // used to record backup files to pitr. + // note: should NewTableID merged with this? + TableIDRemapHint []TableIDRemap +} + +func (r *RewriteRules) RewriteSourceTableID(from, to int64) (rewritten bool) { + toPrefix := tablecodec.EncodeTablePrefix(to) + fromPrefix := tablecodec.EncodeTablePrefix(from) + for _, rule := range r.Data { + if bytes.HasPrefix(rule.OldKeyPrefix, fromPrefix) { + rule.OldKeyPrefix = append(toPrefix, rule.OldKeyPrefix[len(toPrefix):]...) + rewritten = true + } + } + return +} + +func (r *RewriteRules) Clone() *RewriteRules { + data := make([]*import_sstpb.RewriteRule, len(r.Data)) + for i, rule := range r.Data { + data[i] = util.ProtoV1Clone(rule) + } + remap := make([]TableIDRemap, len(r.TableIDRemapHint)) + copy(remap, r.TableIDRemapHint) + + return &RewriteRules{ + Data: data, + TableIDRemapHint: remap, + OldKeyspace: r.OldKeyspace, + NewKeyspace: r.NewKeyspace, + NewTableID: r.NewTableID, + } +} + +// TableIDRemap presents a remapping of table id during rewriting. +type TableIDRemap struct { + Origin int64 + Rewritten int64 } // Append append its argument to this rewrite rules. @@ -75,9 +114,11 @@ func GetRewriteRules( ) *RewriteRules { tableIDs := GetTableIDMap(newTable, oldTable) indexIDs := GetIndexIDMap(newTable, oldTable) + remaps := make([]TableIDRemap, 0) dataRules := make([]*import_sstpb.RewriteRule, 0) for oldTableID, newTableID := range tableIDs { + remaps = append(remaps, TableIDRemap{Origin: oldTableID, Rewritten: newTableID}) if getDetailRule { dataRules = append(dataRules, &import_sstpb.RewriteRule{ OldKeyPrefix: tablecodec.GenTableRecordPrefix(oldTableID), @@ -101,7 +142,8 @@ func GetRewriteRules( } return &RewriteRules{ - Data: dataRules, + Data: dataRules, + TableIDRemapHint: remaps, } } @@ -112,8 +154,10 @@ func GetRewriteRulesMap( tableIDs := GetTableIDMap(newTable, oldTable) indexIDs := GetIndexIDMap(newTable, oldTable) + remaps := make([]TableIDRemap, 0) for oldTableID, newTableID := range tableIDs { + remaps = append(remaps, TableIDRemap{Origin: oldTableID, Rewritten: newTableID}) dataRules := make([]*import_sstpb.RewriteRule, 0) if getDetailRule { dataRules = append(dataRules, &import_sstpb.RewriteRule{ @@ -137,7 +181,8 @@ func GetRewriteRulesMap( } rules[oldTableID] = &RewriteRules{ - Data: dataRules, + Data: dataRules, + TableIDRemapHint: remaps, } } @@ -152,7 +197,7 @@ func GetRewriteRuleOfTable( getDetailRule bool, ) *RewriteRules { dataRules := make([]*import_sstpb.RewriteRule, 0) - + remaps := []TableIDRemap{{Origin: oldTableID, Rewritten: newTableID}} if getDetailRule { dataRules = append(dataRules, &import_sstpb.RewriteRule{ OldKeyPrefix: tablecodec.GenTableRecordPrefix(oldTableID), @@ -174,7 +219,7 @@ func GetRewriteRuleOfTable( }) } - return &RewriteRules{Data: dataRules, NewTableID: newTableID} + return &RewriteRules{Data: dataRules, NewTableID: newTableID, TableIDRemapHint: remaps} } // ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file. @@ -286,6 +331,10 @@ func FindMatchedRewriteRule(file AppliedFile, rules *RewriteRules) *import_sstpb } func (r *RewriteRules) String() string { + if r == nil { + return "[]" + } + var out strings.Builder out.WriteRune('[') if len(r.OldKeyspace) != 0 { @@ -340,12 +389,14 @@ func GetRewriteEncodedKeys(file AppliedFile, rewriteRules *RewriteRules) (startK if startID == endID { startKey, rule = rewriteEncodedKey(file.GetStartKey(), rewriteRules) if rewriteRules != nil && rule == nil { - err = errors.Annotatef(berrors.ErrRestoreInvalidRewrite, "cannot find encode rewrite rule for start key, startKey: %s", redact.Key(file.GetStartKey())) + err = errors.Annotatef(berrors.ErrRestoreInvalidRewrite, "cannot find encode rewrite rule for start key, startKey: %s; rewrite rules: %s", + redact.Key(file.GetStartKey()), rewriteRules) return } endKey, rule = rewriteEncodedKey(file.GetEndKey(), rewriteRules) if rewriteRules != nil && rule == nil { - err = errors.Annotatef(berrors.ErrRestoreInvalidRewrite, "cannot find encode rewrite rule for end key, endKey: %s", redact.Key(file.GetEndKey())) + err = errors.Annotatef(berrors.ErrRestoreInvalidRewrite, "cannot find encode rewrite rule for end key, endKey: %s; rewrite rules: %s", + redact.Key(file.GetEndKey()), rewriteRules) return } } else { diff --git a/br/pkg/storage/BUILD.bazel b/br/pkg/storage/BUILD.bazel index 60c587893af9f..ee28627832715 100644 --- a/br/pkg/storage/BUILD.bazel +++ b/br/pkg/storage/BUILD.bazel @@ -28,6 +28,7 @@ go_library( deps = [ "//br/pkg/errors", "//br/pkg/logutil", + "//br/pkg/utils", "//br/pkg/utils/iter", "//pkg/lightning/log", "//pkg/sessionctx/variable", diff --git a/br/pkg/storage/ks3.go b/br/pkg/storage/ks3.go index 919da5e3aa760..24aaa88e81b0d 100644 --- a/br/pkg/storage/ks3.go +++ b/br/pkg/storage/ks3.go @@ -38,6 +38,10 @@ import ( "go.uber.org/zap" ) +var ( + _ Copier = &KS3Storage{} +) + const ( // ks3 sdk does not expose context, we use hardcoded timeout for network request ks3SDKProvider = "ks3-sdk" @@ -734,3 +738,45 @@ func (rs *KS3Storage) Rename(ctx context.Context, oldFileName, newFileName strin // Close implements ExternalStorage interface. func (*KS3Storage) Close() {} + +func maybeObjectAlreadyExists(err awserr.Error) bool { + // Some versions of server did return the error code "ObjectAlreayExists"... + return err.Code() == "ObjectAlreayExists" || err.Code() == "ObjectAlreadyExists" +} + +// CopyFrom implements Copier. +func (rs *KS3Storage) CopyFrom(ctx context.Context, e ExternalStorage, spec CopySpec) error { + s, ok := e.(*KS3Storage) + if !ok { + return errors.Annotatef(berrors.ErrStorageInvalidConfig, "S3Storage.CopyFrom supports S3 storage only, get %T", e) + } + + copyInput := &s3.CopyObjectInput{ + Bucket: aws.String(rs.options.Bucket), + // NOTE: Perhaps we need to allow copy cross regions / accounts. + CopySource: aws.String(path.Join(s.options.Bucket, s.options.Prefix, spec.From)), + Key: aws.String(rs.options.Prefix + spec.To), + } + + // NOTE: Maybe check whether the Go SDK will handle 200 OK errors. + // https://repost.aws/knowledge-center/s3-resolve-200-internalerror + _, err := s.svc.CopyObjectWithContext(ctx, copyInput) + if err != nil { + aErr, ok := err.(awserr.Error) + if !ok { + return err + } + // KS3 reports an error when copying an object to an existing path. + // AWS S3 will directly override the target. Simulating its behavior. + // Glitch: this isn't an atomic operation. So it is possible left nothing to `spec.To`... + if maybeObjectAlreadyExists(aErr) { + log.Warn("The object of `spec.To` already exists, will delete it and retry", zap.String("object", spec.To), logutil.ShortError(err)) + if err := rs.DeleteFile(ctx, spec.To); err != nil { + return errors.Annotate(err, "during deleting an exist object for making place for copy") + } + + return rs.CopyFrom(ctx, e, spec) + } + } + return nil +} diff --git a/br/pkg/storage/local.go b/br/pkg/storage/local.go index 24f530109053d..2e0c0bf725188 100644 --- a/br/pkg/storage/local.go +++ b/br/pkg/storage/local.go @@ -14,6 +14,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/log" + berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/logutil" "go.uber.org/zap" ) @@ -258,6 +260,19 @@ func (l *LocalStorage) Rename(_ context.Context, oldFileName, newFileName string // Close implements ExternalStorage interface. func (*LocalStorage) Close() {} +func (l *LocalStorage) CopyFrom(ctx context.Context, e ExternalStorage, spec CopySpec) error { + sl, ok := e.(*LocalStorage) + if !ok { + return errors.Annotatef(berrors.ErrInvalidArgument, "expect source to be LocalStorage, got %T", e) + } + from := filepath.Join(sl.base, spec.From) + to := filepath.Join(l.base, spec.To) + if err := mkdirAll(filepath.Dir(to)); err != nil { + return errors.Trace(err) + } + return os.Link(from, to) +} + func pathExists(_path string) (bool, error) { _, err := os.Stat(_path) if err != nil { diff --git a/br/pkg/storage/locking.go b/br/pkg/storage/locking.go index 67ea7c2003081..836a7cbb975be 100644 --- a/br/pkg/storage/locking.go +++ b/br/pkg/storage/locking.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math" "math/rand" "os" "path" @@ -18,6 +19,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/utils" "go.uber.org/multierr" "go.uber.org/zap" ) @@ -181,6 +183,10 @@ type RemoteLock struct { path string } +func (l *RemoteLock) String() string { + return fmt.Sprintf("{path=%s,uuid=%s,storage_uri=%s}", l.path, l.txnID, l.storage.URI()) +} + func tryFetchRemoteLock(ctx context.Context, storage ExternalStorage, path string) error { meta, err := readLockMeta(ctx, storage, path) if err != nil { @@ -244,6 +250,23 @@ func (l RemoteLock) Unlock(ctx context.Context) error { return nil } +func (l RemoteLock) UnlockOnCleanUp(ctx context.Context) { + const cleanUpContextTimeOut = 30 * time.Second + + if ctx.Err() != nil { + logutil.CL(ctx).Warn("Unlocking but the context was done. Use the background context with a deadline.", + logutil.AShortError("ctx-err", ctx.Err())) + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(context.Background(), cleanUpContextTimeOut) + defer cancel() + } + + if err := l.Unlock(ctx); err != nil { + logutil.CL(ctx).Warn("Failed to unlock a lock, you may need to manually delete it.", + zap.Stringer("lock", &l), zap.Int("pid", os.Getpid()), logutil.ShortError(err)) + } +} + func writeLockName(path string) string { return fmt.Sprintf("%s.WRIT", path) } @@ -253,6 +276,35 @@ func newReadLockName(path string) string { return fmt.Sprintf("%s.READ.%016x", path, readID) } +type Locker = func(ctx context.Context, storage ExternalStorage, path, hint string) (lock RemoteLock, err error) + +func LockWith(ctx context.Context, locker Locker, storage ExternalStorage, path, hint string) (lock RemoteLock, err error) { + const JitterMs = 5000 + + retry := utils.InitialRetryState(math.MaxInt, 1*time.Second, 60*time.Second) + jitter := time.Duration(rand.Uint32()%JitterMs+(JitterMs/2)) * time.Millisecond + for { + lock, err = locker(ctx, storage, path, hint) + if err == nil { + return lock, nil + } + retryAfter := retry.ExponentialBackoff() + jitter + log.Info( + "Encountered lock, will retry then.", + logutil.ShortError(err), + zap.String("path", path), + zap.Duration("retry-after", retryAfter), + ) + + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-time.After(retryAfter): + } + } +} + func TryLockRemoteWrite(ctx context.Context, storage ExternalStorage, path, hint string) (lock RemoteLock, err error) { target := writeLockName(path) writer := conditionalPut{ diff --git a/br/pkg/storage/locking_test.go b/br/pkg/storage/locking_test.go index dc8757db7b774..f5f6a4c052e0e 100644 --- a/br/pkg/storage/locking_test.go +++ b/br/pkg/storage/locking_test.go @@ -113,3 +113,15 @@ func TestConcurrentLock(t *testing.T) { requireFileExists(t, filepath.Join(path, "test.lock")) } + +func TestUnlockOnCleanUp(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + strg, pth := createMockStorage(t) + lock, err := storage.TryLockRemote(ctx, strg, "test.lock", "This file is mine!") + require.NoError(t, err) + requireFileExists(t, filepath.Join(pth, "test.lock")) + + cancel() + lock.UnlockOnCleanUp(ctx) + requireFileNotExists(t, filepath.Join(pth, "test.lock")) +} diff --git a/br/pkg/storage/s3.go b/br/pkg/storage/s3.go index 3987512b2a0a2..c1a13ff37892c 100644 --- a/br/pkg/storage/s3.go +++ b/br/pkg/storage/s3.go @@ -93,6 +93,25 @@ func (rs *S3Storage) GetOptions() *backuppb.S3 { return rs.options } +func (rs *S3Storage) CopyFrom(ctx context.Context, e ExternalStorage, spec CopySpec) error { + s, ok := e.(*S3Storage) + if !ok { + return errors.Annotatef(berrors.ErrStorageInvalidConfig, "S3Storage.CopyFrom supports S3 storage only, get %T", e) + } + + copyInput := &s3.CopyObjectInput{ + Bucket: aws.String(rs.options.Bucket), + // NOTE: Perhaps we need to allow copy cross regions / accounts. + CopySource: aws.String(path.Join(s.options.Bucket, s.options.Prefix, spec.From)), + Key: aws.String(rs.options.Prefix + spec.To), + } + + // NOTE: Maybe check whether the Go SDK will handle 200 OK errors. + // https://repost.aws/knowledge-center/s3-resolve-200-internalerror + _, err := s.svc.CopyObjectWithContext(ctx, copyInput) + return err +} + // S3Uploader does multi-part upload to s3. type S3Uploader struct { svc s3iface.S3API diff --git a/br/pkg/storage/storage.go b/br/pkg/storage/storage.go index 042d4a1f4d715..e6d9212f3065e 100644 --- a/br/pkg/storage/storage.go +++ b/br/pkg/storage/storage.go @@ -101,6 +101,16 @@ type ReaderOption struct { PrefetchSize int } +type Copier interface { + // CopyFrom copies a object to the current external storage by the specification. + CopyFrom(ctx context.Context, e ExternalStorage, spec CopySpec) error +} + +type CopySpec struct { + From string + To string +} + // ExternalStorage represents a kind of file system storage. type ExternalStorage interface { // WriteFile writes a complete file to storage, similar to os.WriteFile, but WriteFile should be atomic diff --git a/br/pkg/stream/BUILD.bazel b/br/pkg/stream/BUILD.bazel index 768748325e776..2e600464b60d1 100644 --- a/br/pkg/stream/BUILD.bazel +++ b/br/pkg/stream/BUILD.bazel @@ -36,8 +36,10 @@ go_library( "//pkg/util/table-filter", "@com_github_docker_go_units//:go-units", "@com_github_fatih_color//:color", + "@com_github_google_uuid//:uuid", "@com_github_klauspost_compress//zstd", "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_pingcap_kvproto//pkg/encryptionpb", "@com_github_pingcap_kvproto//pkg/metapb", @@ -80,6 +82,7 @@ go_test( "//pkg/util/intest", "//pkg/util/table-filter", "@com_github_fsouza_fake_gcs_server//fakestorage", + "@com_github_google_uuid//:uuid", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/brpb", diff --git a/br/pkg/stream/stream_metas.go b/br/pkg/stream/stream_metas.go index f738caaee85fd..9ac730bcbb920 100644 --- a/br/pkg/stream/stream_metas.go +++ b/br/pkg/stream/stream_metas.go @@ -5,8 +5,10 @@ package stream import ( "context" "encoding/binary" + "encoding/hex" "fmt" "hash/crc64" + "maps" "math" "path" "slices" @@ -14,10 +16,13 @@ import ( "strconv" "strings" "sync" + "time" "github.com/docker/go-units" "github.com/fatih/color" + "github.com/google/uuid" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" pb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" @@ -27,6 +32,8 @@ import ( "github.com/pingcap/tidb/br/pkg/utils/iter" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/mathutil" + "github.com/pingcap/tidb/pkg/util/versioninfo" + "github.com/tikv/client-go/v2/oracle" "go.uber.org/multierr" "go.uber.org/zap" "golang.org/x/sync/errgroup" @@ -38,8 +45,18 @@ const ( baseTmp = "BASE_TMP" metaSuffix = ".meta" migrationPrefix = "v1/migrations" + lockPrefix = "v1/LOCK" + + SupportedMigVersion = pb.MigrationVersion_M2 ) +func NewMigration() *pb.Migration { + return &pb.Migration{ + Version: pb.MigrationVersion_M2, + Creator: fmt.Sprintf("br;commit=%s;branch=%s", versioninfo.TiDBGitHash, versioninfo.TiDBGitBranch), + } +} + type StreamMetadataSet struct { // if set true, the metadata and datafile won't be removed DryRun bool @@ -194,7 +211,7 @@ func (ms *StreamMetadataSet) RemoveDataFilesAndUpdateMetadataInBatch( updateFn func(num int64), ) ([]string, error) { hst := ms.hook(st) - est := MigerationExtension(hst) + est := MigrationExtension(hst) est.Hooks = updateFnHook{updateFn: updateFn} res := MigratedTo{NewBase: new(pb.Migration)} est.doTruncateLogs(ctx, ms, from, &res) @@ -332,45 +349,113 @@ func ReplaceMetadata(meta *pb.Metadata, filegroups []*pb.DataFileGroup) { updateMetadataInternalStat(meta) } -func AddMigrationToTable(m *pb.Migration, table *glue.Table) { - rd := color.New(color.FgHiRed).Sprint - for i, c := range m.Compactions { - addCompactionToTable(c, table, i) +type marshalMigrationContext struct { + context.Context + est MigrationExt + + output *glue.Table + keyspace []string +} + +func (m *marshalMigrationContext) emit(key, value string) { + bold := color.New(color.Bold).Sprintf + ks := new(strings.Builder) + for _, k := range m.keyspace { + ks.WriteString(k) + ks.WriteString("/") } + ks.WriteString(key) - if len(m.EditMeta) > 0 { - totalDeletePhyFile := 0 - totalDeleteLgcFile := 0 - for _, edit := range m.EditMeta { - totalDeletePhyFile += len(edit.DeletePhysicalFiles) - for _, dl := range edit.DeleteLogicalFiles { - totalDeleteLgcFile += len(dl.Spans) - } + finalValue := bold(value) + m.output.Add(ks.String(), finalValue) +} + +func (m *marshalMigrationContext) keyspaced(key []string, f func()) { + m.keyspace = append(m.keyspace, key...) + defer func() { + m.keyspace = m.keyspace[:len(m.keyspace)-len(key)] + }() + + f() +} + +func (m *marshalMigrationContext) addCompaction(c *pb.LogFileCompaction) { + m.emit("name", c.Name) + m.emit("time", fmt.Sprintf("%d ~ %d", c.CompactionFromTs, c.CompactionUntilTs)) + m.emit("file", fmt.Sprintf("[%q, %q]", c.Artifacts, c.GeneratedFiles)) +} + +func (m *marshalMigrationContext) addMetaEdits(em []*pb.MetaEdit) { + if len(em) == 0 { + return + } + + totalDeletePhyFile := 0 + totalDeleteLgcFile := 0 + for _, edit := range em { + totalDeletePhyFile += len(edit.DeletePhysicalFiles) + for _, dl := range edit.DeleteLogicalFiles { + totalDeleteLgcFile += len(dl.Spans) } - table.Add( - "edit-meta-files", - fmt.Sprintf("%s meta files will be edited.", rd(len(m.EditMeta))), - ) - table.Add( - "delete-physical-file", - fmt.Sprintf("%s physical files will be deleted.", rd(totalDeletePhyFile)), - ) - table.Add( - "delete-logical-file", - fmt.Sprintf("%s logical segments may be deleted, if possible.", rd(totalDeleteLgcFile)), - ) } - for i, c := range m.DestructPrefix { - table.Add(fmt.Sprintf("destruct-prefix[%02d]", i), rd(c)) + m.emit("edit_meta_files", strconv.Itoa(len(em))) + m.emit("delete_physical_file", strconv.Itoa(totalDeletePhyFile)) + m.emit("delete_logical_file", strconv.Itoa(totalDeleteLgcFile)) +} + +func (m *marshalMigrationContext) addTruncatedTo(tso uint64) { + if tso == 0 { + return } - table.Add("truncate-to", rd(m.TruncatedTo)) + m.emit("truncated_to", strconv.FormatUint(tso, 10)) + t := oracle.GetTimeFromTS(tso) + m.emit("truncated_to_in_rfc3339", t.Format(time.RFC3339)) } -func addCompactionToTable(m *pb.LogFileCompaction, table *glue.Table, idx int) { - withIdx := func(s string) string { return fmt.Sprintf("compactions[%d].%s", idx, s) } - table.Add(withIdx("name"), m.Name) - table.Add(withIdx("time"), fmt.Sprintf("%d ~ %d", m.CompactionFromTs, m.CompactionUntilTs)) - table.Add(withIdx("file"), fmt.Sprintf("[%q, %q]", m.Artifacts, m.GeneratedFiles)) +func (m *marshalMigrationContext) addMigration(mig *pb.Migration) { + m.addTruncatedTo(mig.TruncatedTo) + for i, c := range mig.Compactions { + m.keyspaced([]string{"compactions", strconv.Itoa(i)}, func() { + m.addCompaction(c) + }) + } + m.keyspaced([]string{"meta_edit"}, func() { + m.addMetaEdits(mig.EditMeta) + }) + for i, d := range mig.DestructPrefix { + m.keyspaced([]string{"destruct_prefix", strconv.Itoa(i)}, func() { + m.emit("value", d) + }) + } + for i, p := range mig.IngestedSstPaths { + m.keyspaced([]string{"extra_full_backup", strconv.Itoa(i)}, func() { + m.addIngestedSSTss(p) + }) + } +} + +func (m *marshalMigrationContext) addIngestedSSTss(path string) { + fullbk, err := readIngestedSSTs(m.Context, path, m.est.s) + if err != nil { + m.emit("err_during_reading", err.Error()) + m.emit("meta_path", path) + return + } + + m.emit("as_if_ts", strconv.FormatUint(fullbk.AsIfTs, 10)) + m.emit("backup_uuid", hex.EncodeToString(fullbk.GetBackupUuid())) + m.emit("files_count", strconv.Itoa(len(fullbk.Files))) + m.emit("files_position", fullbk.FilesPrefixHint) +} + +func (m MigrationExt) AddMigrationToTable(ctx context.Context, mig *pb.Migration, table *glue.Table) { + cx := marshalMigrationContext{ + Context: ctx, + est: m, + output: table, + } + + cx.addMigration(mig) } // MigrationExt is an extension to the `ExternalStorage` type. @@ -506,7 +591,7 @@ func (NoHooks) HandledAMetaEdit(*pb.MetaEdit) func (NoHooks) HandingMetaEditDone() {} // MigrateionExtnsion installs the extension methods to an `ExternalStorage`. -func MigerationExtension(s storage.ExternalStorage) MigrationExt { +func MigrationExtension(s storage.ExternalStorage) MigrationExt { return MigrationExt{ s: s, prefix: migrationPrefix, @@ -524,6 +609,7 @@ func MergeMigrations(m1 *pb.Migration, m2 *pb.Migration) *pb.Migration { out.TruncatedTo = max(m1.GetTruncatedTo(), m2.GetTruncatedTo()) out.DestructPrefix = append(out.DestructPrefix, m1.GetDestructPrefix()...) out.DestructPrefix = append(out.DestructPrefix, m2.GetDestructPrefix()...) + out.IngestedSstPaths = append(out.IngestedSstPaths, m1.GetIngestedSstPaths()...) return out } @@ -541,12 +627,16 @@ type MergeAndMigratedTo struct { // The term "migrate to" means, try to performance all possible operations // from a migration to the storage. type MigratedTo struct { - // Errors happen during executing the migration. + // Non-fatal errors happen during executing the migration. Warnings []error // The new BASE migration after the operation. NewBase *pb.Migration } +func (m *MigratedTo) Warn(err error) { + m.Warnings = append(m.Warnings, err) +} + // Migrations represents living migrations from the storage. type Migrations struct { // The BASE migration. @@ -556,6 +646,11 @@ type Migrations struct { Layers []*OrderedMigration `json:"layers"` } +// GetReadLock locks the storage and make sure there won't be other one modify this backup. +func (m *MigrationExt) GetReadLock(ctx context.Context, hint string) (storage.RemoteLock, error) { + return storage.LockWith(ctx, storage.TryLockRemoteRead, m.s, lockPrefix, hint) +} + // OrderedMigration is a migration with its path and sequence number. type OrderedMigration struct { SeqNum int `json:"seq_num"` @@ -563,8 +658,43 @@ type OrderedMigration struct { Content pb.Migration `json:"content"` } +func (o *OrderedMigration) unmarshalContent(b []byte) error { + err := o.Content.Unmarshal(b) + if err != nil { + return err + } + if o.Content.Version > SupportedMigVersion { + return errors.Annotatef( + berrors.ErrMigrationVersionNotSupported, + "the migration at %s has version %s(%d), the max version we support is %s(%d)", + o.Path, + o.Content.Version, o.Content.Version, + SupportedMigVersion, SupportedMigVersion, + ) + } + + return nil +} + +type LoadOptions func(*loadConfig) + +type loadConfig struct { + notFoundIsErr bool +} + +func MLNotFoundIsErr() LoadOptions { + return func(c *loadConfig) { + c.notFoundIsErr = true + } +} + // Load loads the current living migrations from the storage. -func (m MigrationExt) Load(ctx context.Context) (Migrations, error) { +func (m MigrationExt) Load(ctx context.Context, opts ...LoadOptions) (Migrations, error) { + cfg := loadConfig{} + for _, o := range opts { + o(&cfg) + } + opt := &storage.WalkOption{ SubDir: m.prefix, } @@ -591,6 +721,9 @@ func (m MigrationExt) Load(ctx context.Context) (Migrations, error) { if collected.Err != nil { return Migrations{}, collected.Err } + if len(collected.Item) == 0 && cfg.notFoundIsErr { + return Migrations{}, errors.Annotatef(berrors.ErrMigrationNotFound, "in the storage %s", m.s.URI()) + } sort.Slice(collected.Item, func(i, j int) bool { return collected.Item[i].SeqNum < collected.Item[j].SeqNum }) @@ -623,11 +756,20 @@ func (m MigrationExt) DryRun(f func(MigrationExt)) []storage.Effect { } func (m MigrationExt) AppendMigration(ctx context.Context, mig *pb.Migration) (int, error) { + lock, err := storage.LockWith(ctx, storage.TryLockRemoteWrite, m.s, lockPrefix, "AppendMigration") + if err != nil { + return 0, err + } + defer lock.UnlockOnCleanUp(ctx) + migs, err := m.Load(ctx) if err != nil { return 0, err } - newSN := migs.Layers[len(migs.Layers)-1].SeqNum + 1 + newSN := 1 + if len(migs.Layers) > 0 { + newSN = migs.Layers[len(migs.Layers)-1].SeqNum + 1 + } name := path.Join(migrationPrefix, nameOf(mig, newSN)) data, err := mig.Marshal() if err != nil { @@ -667,6 +809,8 @@ type mergeAndMigrateToConfig struct { interactiveCheck func(context.Context, *pb.Migration) bool alwaysRunTruncate bool appendPhantomMigration []pb.Migration + + skipLockingInTest bool } type MergeAndMigrateToOpt func(*mergeAndMigrateToConfig) @@ -677,6 +821,12 @@ func MMOptInteractiveCheck(f func(context.Context, *pb.Migration) bool) MergeAnd } } +func MMOptSkipLockingInTest() MergeAndMigrateToOpt { + return func(c *mergeAndMigrateToConfig) { + c.skipLockingInTest = true + } +} + // MMOptAlwaysRunTruncate forces the merge and migrate to always run the truncating. // If not set, when the `truncated-to` wasn'd modified, truncating will be skipped. // This is necessary because truncating, even a no-op, requires a full scan over metadatas for now. @@ -710,6 +860,18 @@ func (m MigrationExt) MergeAndMigrateTo( o(&config) } + if !config.skipLockingInTest { + lock, err := storage.LockWith(ctx, storage.TryLockRemoteWrite, m.s, lockPrefix, "AppendMigration") + if err != nil { + result.MigratedTo = MigratedTo{ + Warnings: []error{ + errors.Annotate(err, "failed to get the lock, nothing will happen"), + }} + return + } + defer lock.UnlockOnCleanUp(ctx) + } + migs, err := m.Load(ctx) if err != nil { result.MigratedTo = MigratedTo{ @@ -781,7 +943,7 @@ func (m MigrationExt) MergeAndMigrateTo( } } } - result.MigratedTo = m.MigrateTo(ctx, newBase, MTMaybeSkipTruncateLog(!config.alwaysRunTruncate && canSkipTruncate)) + result.MigratedTo = m.migrateTo(ctx, newBase, MTMaybeSkipTruncateLog(!config.alwaysRunTruncate && canSkipTruncate)) // Put the final BASE. err = m.writeBase(ctx, result.NewBase) @@ -791,7 +953,7 @@ func (m MigrationExt) MergeAndMigrateTo( return } -type MigrateToOpt func(*migToOpt) +type migrateToOpt func(*migToOpt) type migToOpt struct { skipTruncateLog bool @@ -801,17 +963,17 @@ func MTSkipTruncateLog(o *migToOpt) { o.skipTruncateLog = true } -func MTMaybeSkipTruncateLog(cond bool) MigrateToOpt { +func MTMaybeSkipTruncateLog(cond bool) migrateToOpt { if cond { return MTSkipTruncateLog } return func(*migToOpt) {} } -// MigrateTo migrates to a migration. +// migrateTo migrates to a migration. // If encountered some error during executing some operation, the operation will be put // to the new BASE, which can be retryed then. -func (m MigrationExt) MigrateTo(ctx context.Context, mig *pb.Migration, opts ...MigrateToOpt) MigratedTo { +func (m MigrationExt) migrateTo(ctx context.Context, mig *pb.Migration, opts ...migrateToOpt) MigratedTo { opt := migToOpt{} for _, o := range opts { o(&opt) @@ -820,18 +982,16 @@ func (m MigrationExt) MigrateTo(ctx context.Context, mig *pb.Migration, opts ... result := MigratedTo{ NewBase: new(pb.Migration), } - // Fills: TruncatedTo, Compactions, DesctructPrefix. + m.processCompactions(ctx, mig, &result) + m.processDestroyPrefixes(ctx, mig, &result) + m.processExtFullBackup(ctx, mig, &result) if !opt.skipTruncateLog { - m.doTruncating(ctx, mig, &result) - } else { - // Fast path: `truncate_to` wasn't updated, just copy the compactions and truncated to. - result.NewBase.Compactions = mig.Compactions - result.NewBase.TruncatedTo = mig.TruncatedTo + m.processTruncatedTo(ctx, mig, &result) } // We do skip truncate log first, so metas removed by truncating can be removed in this execution. // Fills: EditMeta for new Base. - m.doMetaEdits(ctx, mig, &result) + m.processMetaEdits(ctx, mig, &result) return result } @@ -848,9 +1008,8 @@ func (m MigrationExt) writeBase(ctx context.Context, mig *pb.Migration) error { return m.s.Rename(ctx, path.Join(m.prefix, baseTmp), path.Join(m.prefix, baseMigrationName)) } -// doMetaEdits applies the modification to the meta files in the storage. -// This will delete data files firstly. Make sure the new BASE was persisted before calling this. -func (m MigrationExt) doMetaEdits(ctx context.Context, mig *pb.Migration, out *MigratedTo) { +// processMetaEdits applies the modification to the meta files in the storage. +func (m MigrationExt) processMetaEdits(ctx context.Context, mig *pb.Migration, out *MigratedTo) { m.Hooks.StartHandlingMetaEdits(mig.EditMeta) handleAMetaEdit := func(medit *pb.MetaEdit) { @@ -1035,8 +1194,29 @@ func (m MigrationExt) tryRemovePrefix(ctx context.Context, pfx string, out *Migr } } -// doTruncating tries to remove outdated compaction, filling the not-yet removed compactions to the new migration. -func (m MigrationExt) doTruncating(ctx context.Context, mig *pb.Migration, result *MigratedTo) { +// processTruncatedTo tries to remove outdated compaction, filling the not-yet removed compactions to the new migration. +func (m MigrationExt) processTruncatedTo(ctx context.Context, mig *pb.Migration, result *MigratedTo) { + result.NewBase.TruncatedTo = mig.TruncatedTo + m.Hooks.StartLoadingMetaForTruncating() + mdSet := new(StreamMetadataSet) + mdSet.MetadataDownloadBatchSize = 128 + shiftTS, err := mdSet.LoadUntilAndCalculateShiftTS(ctx, m.s, mig.TruncatedTo) + if err != nil { + result.Warnings = append(result.Warnings, errors.Annotatef(err, "failed to open meta storage")) + return + } + m.Hooks.EndLoadingMetaForTruncating() + + m.doTruncateLogs(ctx, mdSet, shiftTS, result) +} + +func (m MigrationExt) processDestroyPrefixes(ctx context.Context, mig *pb.Migration, result *MigratedTo) { + for _, pfx := range mig.DestructPrefix { + m.tryRemovePrefix(ctx, pfx, result) + } +} + +func (m MigrationExt) processCompactions(ctx context.Context, mig *pb.Migration, result *MigratedTo) { // NOTE: Execution of truncation wasn't implemented here. // If we are going to truncate some files, for now we still need to use `br log truncate`. for _, compaction := range mig.Compactions { @@ -1048,23 +1228,140 @@ func (m MigrationExt) doTruncating(ctx context.Context, mig *pb.Migration, resul m.tryRemovePrefix(ctx, compaction.GeneratedFiles, result) } } - for _, pfx := range mig.DestructPrefix { - m.tryRemovePrefix(ctx, pfx, result) +} + +func (m MigrationExt) processExtFullBackup(ctx context.Context, mig *pb.Migration, result *MigratedTo) { + groups := LoadIngestedSSTs(ctx, m.s, mig.IngestedSstPaths) + processGroup := func(outErr error, e IngestedSSTsGroup) (copyToNewMig bool, err error) { + if outErr != nil { + return true, outErr + } + + if !e.GroupFinished() { + return true, nil + } + + if e.GroupTS() >= mig.TruncatedTo { + return true, nil + } + + for _, b := range e { + m.tryRemovePrefix(ctx, b.FilesPrefixHint, result) + } + return false, nil + } + for err, item := range iter.AsSeq(ctx, groups) { + copyToNewMig, err := processGroup(err, item) + if err != nil { + result.Warn(err) + } + if copyToNewMig { + for _, exb := range item { + result.NewBase.IngestedSstPaths = append(result.NewBase.IngestedSstPaths, exb.path) + } + } } +} - result.NewBase.TruncatedTo = mig.TruncatedTo +type PathedIngestedSSTs struct { + *pb.IngestedSSTs + path string +} - m.Hooks.StartLoadingMetaForTruncating() - mdSet := new(StreamMetadataSet) - mdSet.MetadataDownloadBatchSize = 128 - shiftTS, err := mdSet.LoadUntilAndCalculateShiftTS(ctx, m.s, mig.TruncatedTo) +type IngestedSSTsGroup []PathedIngestedSSTs + +func (ebs IngestedSSTsGroup) GroupFinished() bool { + for _, b := range ebs { + if b.Finished { + return true + } + } + return false +} + +func (ebs IngestedSSTsGroup) GroupTS() uint64 { + for _, b := range ebs { + if b.Finished { + return b.AsIfTs + } + } + return math.MaxUint64 +} + +func LoadIngestedSSTs( + ctx context.Context, + s storage.ExternalStorage, + paths []string, +) iter.TryNextor[IngestedSSTsGroup] { + fullBackupDirIter := iter.FromSlice(paths) + backups := iter.TryMap(fullBackupDirIter, func(name string) (PathedIngestedSSTs, error) { + // name is the absolute path in external storage. + bkup, err := readIngestedSSTs(ctx, name, s) + failpoint.InjectCall("load-ingested-ssts-err", &err) + if err != nil { + return PathedIngestedSSTs{}, errors.Annotatef(err, "failed to read backup at %s", name) + } + return PathedIngestedSSTs{IngestedSSTs: bkup, path: name}, nil + }) + extBackups, err := groupExtraBackups(ctx, backups) if err != nil { - result.Warnings = append(result.Warnings, errors.Annotatef(err, "failed to open meta storage")) - return + return iter.Fail[IngestedSSTsGroup](err) } - m.Hooks.EndLoadingMetaForTruncating() + return iter.FromSlice(extBackups) +} - m.doTruncateLogs(ctx, mdSet, shiftTS, result) +func groupExtraBackups(ctx context.Context, i iter.TryNextor[PathedIngestedSSTs]) ([]IngestedSSTsGroup, error) { + var ( + collected = map[uuid.UUID]IngestedSSTsGroup{} + finished = map[uuid.UUID]struct{}{} + ) + + for err, fbk := range iter.AsSeq(ctx, i) { + if err != nil { + return nil, err + } + + if len(fbk.BackupUuid) != len(uuid.UUID{}) { + return nil, errors.Annotatef(berrors.ErrInvalidArgument, + "the full backup UUID has bad length(%d)", len(fbk.BackupUuid)) + } + uid := uuid.UUID(fbk.BackupUuid) + log.Info("Collecting extra full backup", + zap.Stringer("UUID", uid), zap.String("path", fbk.FilesPrefixHint), zap.Bool("finished", fbk.Finished)) + + if _, ok := finished[uid]; ok { + log.Warn("Encountered a finished full backup.", zap.Stringer("UUID", uid), zap.String("path", fbk.FilesPrefixHint)) + return nil, errors.Annotatef( + berrors.ErrInvalidArgument, + "the extra full backup group %s at %s encounters an extra full backup meta after a finished one", + uid, fbk.FilesPrefixHint, + ) + } + + collected[uid] = append(collected[uid], fbk) + if fbk.Finished { + finished[uid] = struct{}{} + } + } + + res := make([]IngestedSSTsGroup, 0, len(collected)) + for v := range maps.Values(collected) { + res = append(res, v) + } + return res, nil +} + +func readIngestedSSTs(ctx context.Context, name string, s storage.ExternalStorage) (*pb.IngestedSSTs, error) { + reader, err := s.ReadFile(ctx, name) + if err != nil { + return nil, err + } + + var backup pb.IngestedSSTs + if err := backup.Unmarshal(reader); err != nil { + return nil, err + } + return &backup, nil } func (m MigrationExt) loadFilesOfPrefix(ctx context.Context, prefix string) (out []string, err error) { @@ -1309,15 +1606,33 @@ func isEmptyMetadata(md *pb.Metadata) bool { return len(md.FileGroups) == 0 && len(md.Files) == 0 } +/* Below are hash algorithms for hashing a component of the migration. + * Sadly there isn't a document describes the behavior of the algorithms. + * Perhaps we can standardlize them in the future. + * Maybe by defining a ordering-insensitive object hash algorithm for protocol buffer. + * + * Note: For now, the canon of the hash algorithm for a message should follow the following rules: + * - If a hash algorithm for a message exists both in TiKV and BR and conflicting, we + * follow the implementation at where the message firstly creates (say, for compactions, + * TiKV will be the canonical implementation. while for extra full backups, BR is canonical.). + * - For commonly used fields, follow the implementation in BR. + * + * Another note: nowadays, the hash of a migration is mainly used for detecting duplicated works, + * so the difference between hash algorithms won't result in something too bad... + */ + func hashMigration(m *pb.Migration) uint64 { - var crc64 uint64 = 0 + var crc64Res uint64 = 0 for _, compaction := range m.Compactions { - crc64 ^= compaction.ArtifactsHash + crc64Res ^= compaction.ArtifactsHash } for _, metaEdit := range m.EditMeta { - crc64 ^= hashMetaEdit(metaEdit) + crc64Res ^= hashMetaEdit(metaEdit) + } + for _, extBkup := range m.IngestedSstPaths { + crc64Res ^= crc64.Checksum([]byte(extBkup), crc64.MakeTable(crc64.ISO)) } - return crc64 ^ m.TruncatedTo + return crc64Res ^ m.TruncatedTo } func hashMetaEdit(metaEdit *pb.MetaEdit) uint64 { @@ -1348,3 +1663,11 @@ func hashMetaEdit(metaEdit *pb.MetaEdit) uint64 { func nameOf(mig *pb.Migration, sn int) string { return fmt.Sprintf("%08d_%016X.mgrt", sn, hashMigration(mig)) } + +func isEmptyMigration(mig *pb.Migration) bool { + return len(mig.Compactions) == 0 && + len(mig.EditMeta) == 0 && + len(mig.IngestedSstPaths) == 0 && + len(mig.DestructPrefix) == 0 && + mig.TruncatedTo == 0 +} diff --git a/br/pkg/stream/stream_metas_test.go b/br/pkg/stream/stream_metas_test.go index bcea9cb585953..47de890300678 100644 --- a/br/pkg/stream/stream_metas_test.go +++ b/br/pkg/stream/stream_metas_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/fsouza/fake-gcs-server/fakestorage" + "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/failpoint" backuppb "github.com/pingcap/kvproto/pkg/brpb" @@ -435,6 +436,53 @@ func TestReplaceMetadataTs(t *testing.T) { require.Equal(t, m.MaxTs, uint64(4)) } +func pef(t *testing.T, fb *backuppb.IngestedSSTs, sn int, s storage.ExternalStorage) string { + path := fmt.Sprintf("extbackupmeta_%08d", sn) + bs, err := fb.Marshal() + if err != nil { + require.NoError(t, err) + } + + err = s.WriteFile(context.Background(), path, bs) + require.NoError(t, err) + return path +} + +type efOP func(*backuppb.IngestedSSTs) + +func extFullBkup(ops ...efOP) *backuppb.IngestedSSTs { + ef := &backuppb.IngestedSSTs{} + for _, op := range ops { + op(ef) + } + return ef +} + +func finished() efOP { + return func(ef *backuppb.IngestedSSTs) { + ef.Finished = true + } +} + +func makeID() efOP { + id := uuid.New() + return func(ef *backuppb.IngestedSSTs) { + ef.BackupUuid = id[:] + } +} + +func prefix(pfx string) efOP { + return func(ef *backuppb.IngestedSSTs) { + ef.FilesPrefixHint = pfx + } +} + +func asIfTS(ts uint64) efOP { + return func(ef *backuppb.IngestedSSTs) { + ef.AsIfTs = ts + } +} + func m(storeId int64, minTS, maxTS uint64) *backuppb.Metadata { return &backuppb.Metadata{ StoreId: storeId, @@ -446,6 +494,12 @@ func m(storeId int64, minTS, maxTS uint64) *backuppb.Metadata { type migOP func(*backuppb.Migration) +func mExtFullBackup(path ...string) migOP { + return func(m *backuppb.Migration) { + m.IngestedSstPaths = append(m.IngestedSstPaths, path...) + } +} + func mDstrPfx(path ...string) migOP { return func(m *backuppb.Migration) { m.DestructPrefix = append(m.DestructPrefix, path...) @@ -619,7 +673,7 @@ func tmp(t *testing.T) *storage.LocalStorage { } func mig(ops ...migOP) *backuppb.Migration { - mig := &backuppb.Migration{} + mig := NewMigration() for _, op := range ops { op(mig) } @@ -2436,7 +2490,7 @@ func TestBasicMigration(t *testing.T) { ) bs := storage.Batch(s) - est := MigerationExtension(bs) + est := MigrationExtension(bs) res := MergeMigrations(mig1, mig2) resE := mig( @@ -2454,7 +2508,7 @@ func TestBasicMigration(t *testing.T) { requireMigrationsEqual(t, resE, res) ctx := context.Background() - mg := est.MigrateTo(ctx, res) + mg := est.migrateTo(ctx, res) newBaseE := mig(mLogDel("00002.meta", spans("00001.log", 1024, sp(0, 42), sp(42, 18)))) require.Empty(t, mg.Warnings) @@ -2470,7 +2524,7 @@ func TestBasicMigration(t *testing.T) { delRem := mig(mLogDel("00002.meta", spans("00001.log", 1024, sp(60, 1024-60)))) newNewBase := MergeMigrations(mg.NewBase, delRem) - mg = est.MigrateTo(ctx, newNewBase) + mg = est.migrateTo(ctx, newNewBase) require.Empty(t, mg.Warnings) requireMigrationsEqual(t, mg.NewBase, mig()) } @@ -2502,7 +2556,7 @@ func TestMergeAndMigrateTo(t *testing.T) { mig3p := pmig(s, 3, mig3) bs := storage.Batch(s) - est := MigerationExtension(bs) + est := MigrationExtension(bs) ctx := context.Background() migs, err := est.Load(ctx) @@ -2514,7 +2568,7 @@ func TestMergeAndMigrateTo(t *testing.T) { spans(lN(3), 100, sp(0, 42), sp(42, 18), sp(60, 40))), )) - mg := est.MergeAndMigrateTo(ctx, 2) + mg := est.MergeAndMigrateTo(ctx, 2, MMOptSkipLockingInTest()) require.Len(t, mg.Source, 2) require.Empty(t, mg.Warnings) @@ -2532,7 +2586,7 @@ func TestMergeAndMigrateTo(t *testing.T) { requireMigrationsEqual(t, &migs.Layers[0].Content, mig3) require.EqualValues(t, migs.Layers[0].SeqNum, 3) - mg = est.MergeAndMigrateTo(ctx, 3) + mg = est.MergeAndMigrateTo(ctx, 3, MMOptSkipLockingInTest()) require.Empty(t, mg.Warnings) requireMigrationsEqual(t, mg.NewBase, mig()) effs = effectsOf(bs.ReadOnlyEffects()) @@ -2570,7 +2624,7 @@ func TestRemoveCompaction(t *testing.T) { mTruncatedTo(20), ) bs := storage.Batch(s) - est := MigerationExtension(bs) + est := MigrationExtension(bs) merged := MergeMigrations(mig1, mig2) requireMigrationsEqual(t, merged, mig( @@ -2581,7 +2635,7 @@ func TestRemoveCompaction(t *testing.T) { mTruncatedTo(30), )) - mg := est.MigrateTo(ctx, merged) + mg := est.migrateTo(ctx, merged) requireMigrationsEqual(t, mg.NewBase, mig( mCompaction(cDir(1), aDir(1), 10, 40), mCompaction(cDir(2), aDir(2), 35, 50), @@ -2609,13 +2663,13 @@ func TestRetry(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/br/pkg/storage/local_write_file_err", `1*return("this disk remembers nothing")`)) ctx := context.Background() - est := MigerationExtension(s) - mg := est.MergeAndMigrateTo(ctx, 2) + est := MigrationExtension(s) + mg := est.MergeAndMigrateTo(ctx, 2, MMOptSkipLockingInTest()) require.Len(t, mg.Warnings, 1) require.Error(t, mg.Warnings[0], "this disk remembers nothing") requireMigrationsEqual(t, mg.NewBase, mig(mDel(mN(1), lN(1), lN(2)))) - mg = est.MergeAndMigrateTo(ctx, 2) + mg = est.MergeAndMigrateTo(ctx, 2, MMOptSkipLockingInTest()) require.Empty(t, slices.DeleteFunc(mg.Warnings, func(err error) bool { return strings.Contains(err.Error(), "failed to delete file") })) @@ -2640,8 +2694,8 @@ func TestRetryRemoveCompaction(t *testing.T) { ) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/br/pkg/storage/local_delete_file_err", `1*return("this disk will never forget")`)) - est := MigerationExtension(s) - mg := est.MigrateTo(ctx, mig1) + est := MigrationExtension(s) + mg := est.migrateTo(ctx, mig1) require.Len(t, mg.Warnings, 1) require.Error(t, mg.Warnings[0], "this disk will never forget") requireMigrationsEqual(t, mg.NewBase, mig( @@ -2650,7 +2704,7 @@ func TestRetryRemoveCompaction(t *testing.T) { mDstrPfx(cDir(1), aDir(1)), )) - mg = est.MigrateTo(ctx, mg.NewBase) + mg = est.migrateTo(ctx, mg.NewBase) require.Empty(t, mg.Warnings) requireMigrationsEqual(t, mg.NewBase, mig( mCompaction(placeholder(cDir(2)), placeholder(aDir(2)), 28, 32), @@ -2684,10 +2738,10 @@ func TestWithSimpleTruncate(t *testing.T) { }, })) - est := MigerationExtension(s) + est := MigrationExtension(s) m := mig(mTruncatedTo(65)) var res MigratedTo - effs := est.DryRun(func(me MigrationExt) { res = me.MigrateTo(ctx, m) }) + effs := est.DryRun(func(me MigrationExt) { res = me.migrateTo(ctx, m) }) require.Empty(t, res.Warnings) for _, eff := range effs { @@ -2742,7 +2796,7 @@ func TestAppendingMigs(t *testing.T) { asp(fi(80, 85, WriteCF, 72), sp(34, 5)), }, }), lN(2)) - est := MigerationExtension(s) + est := MigrationExtension(s) cDir := func(n uint64) string { return fmt.Sprintf("%05d/output", n) } aDir := func(n uint64) string { return fmt.Sprintf("%05d/metas", n) } @@ -2775,10 +2829,10 @@ func TestUserAbort(t *testing.T) { pmig(s, 0, mig(mTruncatedTo(42))) pmig(s, 1, mig(mTruncatedTo(96))) - est := MigerationExtension(s) + est := MigrationExtension(s) var res MergeAndMigratedTo effs := est.DryRun(func(me MigrationExt) { - res = me.MergeAndMigrateTo(ctx, 1, MMOptInteractiveCheck(func(ctx context.Context, m *backuppb.Migration) bool { + res = me.MergeAndMigrateTo(ctx, 1, MMOptSkipLockingInTest(), MMOptInteractiveCheck(func(ctx context.Context, m *backuppb.Migration) bool { return false })) }) @@ -2786,3 +2840,153 @@ func TestUserAbort(t *testing.T) { require.ErrorContains(t, res.Warnings[0], "aborted") require.Empty(t, effs) } + +func TestUnsupportedVersion(t *testing.T) { + s := tmp(t) + m := mig(mVersion(backuppb.MigrationVersion(65535))) + pmig(s, 1, m) + + est := MigrationExtension(s) + ctx := context.Background() + _, err := est.Load(ctx) + require.Error(t, err) + require.ErrorContains(t, err, "ErrMigrationVersionNotSupported") +} + +func mVersion(migrationVersion backuppb.MigrationVersion) migOP { + return func(m *backuppb.Migration) { + m.Version = migrationVersion + } +} + +func TestCreator(t *testing.T) { + mig := NewMigration() + require.Contains(t, mig.Creator, "br") + require.Equal(t, mig.Version, SupportedMigVersion) +} + +func TestGroupedExtFullBackup(t *testing.T) { + ctx := context.Background() + s := tmp(t) + placeholder := func(pfx string) string { + path := path.Join(pfx, "monolith") + require.NoError(t, s.WriteFile(ctx, path, []byte("🪨"))) + return path + } + idx := 0 + somewhere := func() string { + idx += 1 + return placeholder(fmt.Sprintf("%06d", idx)) + } + + type Case struct { + InputGroups []*backuppb.IngestedSSTs + TruncatedTo uint64 + + RequireRem []int + } + + cases := []Case{ + { + InputGroups: []*backuppb.IngestedSSTs{ + extFullBkup(prefix(somewhere()), asIfTS(10), makeID(), finished()), + extFullBkup(prefix(somewhere()), asIfTS(12), makeID(), finished()), + }, + TruncatedTo: 11, + RequireRem: []int{1}, + }, + { + InputGroups: []*backuppb.IngestedSSTs{ + extFullBkup(prefix(somewhere()), asIfTS(10), makeID(), finished()), + extFullBkup(prefix(somewhere()), asIfTS(12), makeID(), finished()), + }, + TruncatedTo: 13, + RequireRem: []int{}, + }, + { + InputGroups: []*backuppb.IngestedSSTs{ + extFullBkup(prefix(somewhere()), asIfTS(10), makeID(), finished()), + extFullBkup(prefix(somewhere()), asIfTS(12), makeID(), finished()), + }, + TruncatedTo: 10, + RequireRem: []int{0, 1}, + }, + { + InputGroups: func() []*backuppb.IngestedSSTs { + id := makeID() + return []*backuppb.IngestedSSTs{ + extFullBkup(prefix(somewhere()), id), + extFullBkup(prefix(somewhere()), asIfTS(10), id, finished()), + extFullBkup(prefix(somewhere()), asIfTS(12), makeID(), finished()), + } + }(), + TruncatedTo: 11, + RequireRem: []int{2}, + }, + { + InputGroups: func() []*backuppb.IngestedSSTs { + id := makeID() + return []*backuppb.IngestedSSTs{ + extFullBkup(prefix(somewhere()), id), + extFullBkup(prefix(somewhere()), asIfTS(12), id, finished()), + extFullBkup(prefix(somewhere()), asIfTS(10), makeID(), finished()), + } + }(), + TruncatedTo: 11, + RequireRem: []int{0, 1}, + }, + { + InputGroups: func() []*backuppb.IngestedSSTs { + id := makeID() + return []*backuppb.IngestedSSTs{ + extFullBkup(prefix(somewhere()), asIfTS(999), id), + extFullBkup(prefix(somewhere()), asIfTS(10), id, finished()), + extFullBkup(prefix(somewhere()), asIfTS(12), makeID(), finished()), + } + }(), + TruncatedTo: 11, + RequireRem: []int{2}, + }, + { + InputGroups: []*backuppb.IngestedSSTs{ + extFullBkup(prefix(somewhere()), asIfTS(10), makeID()), + extFullBkup(prefix(somewhere()), asIfTS(12), makeID()), + extFullBkup(prefix(somewhere()), asIfTS(14), makeID()), + }, + TruncatedTo: 11, + RequireRem: []int{0, 1, 2}, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + m := mig() + paths := []PathedIngestedSSTs{} + for i, input := range c.InputGroups { + p := pef(t, input, i, s) + paths = append(paths, PathedIngestedSSTs{ + path: p, + IngestedSSTs: input, + }) + mExtFullBackup(p)(m) + require.FileExists(t, path.Join(s.Base(), input.FilesPrefixHint)) + } + mTruncatedTo(c.TruncatedTo)(m) + est := MigrationExtension(s) + res := est.migrateTo(ctx, m) + require.NoError(t, multierr.Combine(res.Warnings...)) + chosen := []string{} + nonChosen := []PathedIngestedSSTs{} + forgottenIdx := 0 + for _, i := range c.RequireRem { + chosen = append(chosen, paths[i].path) + nonChosen = append(nonChosen, paths[forgottenIdx:i]...) + forgottenIdx = i + 1 + } + require.ElementsMatch(t, chosen, res.NewBase.IngestedSstPaths) + for _, p := range nonChosen { + require.NoFileExists(t, path.Join(s.Base(), p.FilesPrefixHint, "monolith")) + } + }) + } +} diff --git a/br/pkg/summary/summary.go b/br/pkg/summary/summary.go index 3642410bdcd16..0a5b7f7fcd255 100644 --- a/br/pkg/summary/summary.go +++ b/br/pkg/summary/summary.go @@ -3,11 +3,16 @@ package summary import ( + "sync/atomic" "time" "go.uber.org/zap" ) +var ( + lastStatus atomic.Bool +) + // SetUnit set unit "backup/restore" for summary log. func SetUnit(unit string) { collector.SetUnit(unit) @@ -40,9 +45,15 @@ func CollectUint(name string, t uint64) { // SetSuccessStatus sets final success status. func SetSuccessStatus(success bool) { + lastStatus.Store(success) collector.SetSuccessStatus(success) } +// Succeed returns whether the last call to `SetSuccessStatus` passes `true`. +func Succeed() bool { + return lastStatus.Load() +} + // NowDureTime returns the duration between start time and current time func NowDureTime() time.Duration { return collector.NowDureTime() diff --git a/br/pkg/task/BUILD.bazel b/br/pkg/task/BUILD.bazel index 77aba10ac0fcf..092788e4ea4f6 100644 --- a/br/pkg/task/BUILD.bazel +++ b/br/pkg/task/BUILD.bazel @@ -50,6 +50,7 @@ go_library( "//br/pkg/streamhelper/daemon", "//br/pkg/summary", "//br/pkg/utils", + "//br/pkg/utils/iter", "//br/pkg/version", "//pkg/config", "//pkg/ddl", @@ -59,6 +60,8 @@ go_library( "//pkg/kv", "//pkg/meta/model", "//pkg/parser/model", + "//pkg/metrics", + "//pkg/parser/ast", "//pkg/parser/mysql", "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", @@ -98,6 +101,7 @@ go_library( "@org_golang_x_sync//errgroup", "@org_uber_go_multierr//:multierr", "@org_uber_go_zap//:zap", + "@org_uber_go_zap//zapcore", ], ) diff --git a/br/pkg/task/common.go b/br/pkg/task/common.go index 1813741634609..c95bee08c59d8 100644 --- a/br/pkg/task/common.go +++ b/br/pkg/task/common.go @@ -596,6 +596,10 @@ func (cfg *Config) normalizePDURLs() error { return nil } +func (cfg *Config) UserFiltered() bool { + return len(cfg.Schemas) != 0 || len(cfg.Tables) != 0 || len(cfg.FilterStr) != 0 +} + // ParseFromFlags parses the config from the flag set. func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { var err error diff --git a/br/pkg/task/operator/force_flush.go b/br/pkg/task/operator/force_flush.go index 6bdc3a0bae288..00712e8e56a07 100644 --- a/br/pkg/task/operator/force_flush.go +++ b/br/pkg/task/operator/force_flush.go @@ -2,22 +2,18 @@ package operator import ( "context" - "crypto/tls" "slices" "github.com/pingcap/errors" logbackup "github.com/pingcap/kvproto/pkg/logbackuppb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" - "github.com/pingcap/tidb/br/pkg/task" - "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/pkg/util/engine" pd "github.com/tikv/pd/client" "github.com/tikv/pd/client/opt" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" ) func getAllTiKVs(ctx context.Context, p pd.Client) ([]*metapb.Store, error) { @@ -29,25 +25,6 @@ func getAllTiKVs(ctx context.Context, p pd.Client) ([]*metapb.Store, error) { return withoutTiFlash, err } -func createStoreManager(pd pd.Client, cfg *task.Config) (*utils.StoreManager, error) { - var ( - tconf *tls.Config - err error - ) - - if cfg.TLS.IsEnabled() { - tconf, err = cfg.TLS.ToTLSConfig() - if err != nil { - return nil, errors.Annotate(err, "invalid tls config") - } - } - kvMgr := utils.NewStoreManager(pd, keepalive.ClientParameters{ - Time: cfg.GRPCKeepaliveTime, - Timeout: cfg.GRPCKeepaliveTimeout, - }, tconf) - return kvMgr, nil -} - func RunForceFlush(ctx context.Context, cfg *ForceFlushConfig) error { pdMgr, err := dialPD(ctx, &cfg.Config) if err != nil { diff --git a/br/pkg/task/operator/list_migration.go b/br/pkg/task/operator/list_migration.go index d6c7efd57197a..1e030d7e0f3d8 100644 --- a/br/pkg/task/operator/list_migration.go +++ b/br/pkg/task/operator/list_migration.go @@ -26,8 +26,8 @@ func RunListMigrations(ctx context.Context, cfg ListMigrationConfig) error { if err != nil { return err } - ext := stream.MigerationExtension(st) - migs, err := ext.Load(ctx) + ext := stream.MigrationExtension(st) + migs, err := ext.Load(ctx, stream.MLNotFoundIsErr()) if err != nil { return err } @@ -40,12 +40,12 @@ func RunListMigrations(ctx context.Context, cfg ListMigrationConfig) error { console.Println(statusOK(fmt.Sprintf("Total %d Migrations.", len(migs.Layers)+1))) console.Printf("> BASE <\n") tbl := console.CreateTable() - stream.AddMigrationToTable(migs.Base, tbl) + ext.AddMigrationToTable(ctx, migs.Base, tbl) tbl.Print() for _, t := range migs.Layers { console.Printf("> %08d <\n", t.SeqNum) tbl := console.CreateTable() - stream.AddMigrationToTable(&t.Content, tbl) + ext.AddMigrationToTable(ctx, &t.Content, tbl) tbl.Print() } } diff --git a/br/pkg/task/operator/migrate_to.go b/br/pkg/task/operator/migrate_to.go index 282e82784ecb9..2a086b9868db1 100644 --- a/br/pkg/task/operator/migrate_to.go +++ b/br/pkg/task/operator/migrate_to.go @@ -5,7 +5,7 @@ import ( "github.com/fatih/color" "github.com/pingcap/errors" - backuppb "github.com/pingcap/kvproto/pkg/brpb" + backup "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/stream" @@ -39,16 +39,16 @@ func (cx migrateToCtx) printErr(errs []error, msg string) { } } -func (cx migrateToCtx) askForContinue(targetMig *backuppb.Migration) bool { +func (cx migrateToCtx) askForContinue(ctx context.Context, targetMig *backup.Migration) bool { tbl := cx.console.CreateTable() - stream.AddMigrationToTable(targetMig, tbl) + cx.est.AddMigrationToTable(ctx, targetMig, tbl) cx.console.Println("The migration going to be executed will be like: ") tbl.Print() return cx.console.PromptBool("Continue? ") } -func (cx migrateToCtx) dryRun(f func(stream.MigrationExt) stream.MergeAndMigratedTo) error { +func (cx migrateToCtx) dryRun(ctx context.Context, f func(stream.MigrationExt) stream.MergeAndMigratedTo) error { var ( est = cx.est console = cx.console @@ -60,7 +60,7 @@ func (cx migrateToCtx) dryRun(f func(stream.MigrationExt) stream.MergeAndMigrate }) tbl := console.CreateTable() - stream.AddMigrationToTable(estBase.NewBase, tbl) + cx.est.AddMigrationToTable(ctx, estBase.NewBase, tbl) console.Println("The new BASE migration will be like: ") tbl.Print() file, err := storage.SaveJSONEffectsToTmp(effects) @@ -90,7 +90,7 @@ func RunMigrateTo(ctx context.Context, cfg MigrateToConfig) error { console := glue.ConsoleOperations{ConsoleGlue: glue.StdIOGlue{}} - est := stream.MigerationExtension(st) + est := stream.MigrationExtension(st) est.Hooks = stream.NewProgressBarHooks(console) migs, err := est.Load(ctx) if err != nil { @@ -120,12 +120,14 @@ func RunMigrateTo(ctx context.Context, cfg MigrateToConfig) error { return nil } if cfg.DryRun { - run = cx.dryRun + run = func(f func(stream.MigrationExt) stream.MergeAndMigratedTo) error { + return cx.dryRun(ctx, f) + } } return run(func(est stream.MigrationExt) stream.MergeAndMigratedTo { - return est.MergeAndMigrateTo(ctx, targetVersion, stream.MMOptInteractiveCheck(func(ctx context.Context, m *backuppb.Migration) bool { - return cfg.Yes || cx.askForContinue(m) + return est.MergeAndMigrateTo(ctx, targetVersion, stream.MMOptInteractiveCheck(func(ctx context.Context, m *backup.Migration) bool { + return cfg.Yes || cx.askForContinue(ctx, m) })) }) } diff --git a/br/pkg/task/operator/prepare_snap.go b/br/pkg/task/operator/prepare_snap.go index cbe5c3ac2442b..50a624423240b 100644 --- a/br/pkg/task/operator/prepare_snap.go +++ b/br/pkg/task/operator/prepare_snap.go @@ -19,12 +19,32 @@ import ( "github.com/pingcap/tidb/br/pkg/task" "github.com/pingcap/tidb/br/pkg/utils" "github.com/tikv/client-go/v2/tikv" + pd "github.com/tikv/pd/client" "go.uber.org/multierr" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc/keepalive" ) +func createStoreManager(pd pd.Client, cfg *task.Config) (*utils.StoreManager, error) { + var ( + tconf *tls.Config + err error + ) + + if cfg.TLS.IsEnabled() { + tconf, err = cfg.TLS.ToTLSConfig() + if err != nil { + return nil, errors.Annotate(err, "invalid tls config") + } + } + kvMgr := utils.NewStoreManager(pd, keepalive.ClientParameters{ + Time: cfg.GRPCKeepaliveTime, + Timeout: cfg.GRPCKeepaliveTimeout, + }, tconf) + return kvMgr, nil +} + func dialPD(ctx context.Context, cfg *task.Config) (*pdutil.PdController, error) { var tc *tls.Config if cfg.TLS.IsEnabled() { diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index 65b391f678edf..e903063ad731d 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -15,7 +15,9 @@ import ( "github.com/google/uuid" "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" backuppb "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/kvproto/pkg/encryptionpb" "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/checkpoint" pconfig "github.com/pingcap/tidb/br/pkg/config" @@ -38,7 +40,10 @@ import ( "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/metrics" + "github.com/pingcap/tidb/pkg/parser/ast" pmodel "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/engine" "github.com/spf13/cobra" @@ -276,6 +281,10 @@ type RestoreConfig struct { UseFSR bool `json:"use-fsr" toml:"use-fsr"` } +func (cfg *RestoreConfig) LocalEncryptionEnabled() bool { + return cfg.CipherInfo.CipherType != encryptionpb.EncryptionMethod_PLAINTEXT +} + // DefineRestoreFlags defines common flags for the restore tidb command. func DefineRestoreFlags(flags *pflag.FlagSet) { flags.Bool(flagNoSchema, false, "skip creating schemas and tables, reuse existing empty ones") @@ -665,6 +674,12 @@ func DefaultRestoreConfig(commonConfig Config) RestoreConfig { return cfg } +func printRestoreMetrics() { + log.Info("Metric: import_file_seconds", zap.Object("metric", logutil.MarshalHistogram(metrics.RestoreImportFileSeconds))) + log.Info("Metric: upload_sst_for_pitr_seconds", zap.Object("metric", logutil.MarshalHistogram(metrics.RestoreUploadSSTForPiTRSeconds))) + log.Info("Metric: upload_sst_meta_for_pitr_seconds", zap.Object("metric", logutil.MarshalHistogram(metrics.RestoreUploadSSTMetaForPiTRSeconds))) +} + // RunRestore starts a restore task inside the current goroutine. func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { etcdCLI, err := dialEtcdWithCfg(c, cfg.Config) @@ -676,7 +691,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf log.Error("failed to close the etcd client", zap.Error(err)) } }() - if err := checkTaskExists(c, cfg, etcdCLI); err != nil { + if err := checkConflictingLogBackup(c, cfg, etcdCLI); err != nil { return errors.Annotate(err, "failed to check task exists") } closeF, err := registerTaskToPD(c, etcdCLI) @@ -698,6 +713,8 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } defer mgr.Close() + defer printRestoreMetrics() + var restoreError error if IsStreamRestore(cmdName) { if err := version.CheckClusterVersion(c, mgr.GetPDClient(), version.CheckVersionForBRPiTR); err != nil { @@ -786,14 +803,15 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s // Init DB connection sessions err = client.Init(g, mgr.GetStorage()) defer client.Close() - if err != nil { return errors.Trace(err) } + u, s, backupMeta, err := ReadBackupMeta(ctx, metautil.MetaFile, &cfg.Config) if err != nil { return errors.Trace(err) } + if cfg.CheckRequirements { err := checkIncompatibleChangefeed(ctx, backupMeta.EndVersion, mgr.GetDomain().GetEtcdClient()) log.Info("Checking incompatible TiCDC changefeeds before restoring.", @@ -935,6 +953,15 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s }() } + err = client.InstallPiTRSupport(ctx, snapclient.PiTRCollDep{ + PDCli: mgr.GetPDClient(), + EtcdCli: mgr.GetDomain().GetEtcdClient(), + Storage: util.ProtoV1Clone(u), + }) + if err != nil { + return errors.Trace(err) + } + sp := utils.BRServiceSafePoint{ BackupTS: restoreTS, TTL: utils.DefaultBRGCSafePointTTL, @@ -1163,6 +1190,11 @@ func runSnapshotRestore(c context.Context, mgr *conn.Mgr, g glue.Glue, cmdName s return errors.Trace(err) } + failpoint.InjectCall("run-snapshot-restore-about-to-finish", &err) + if err != nil { + return err + } + schedulersRemovable = true // Set task summary to success status. diff --git a/br/pkg/task/stream.go b/br/pkg/task/stream.go index 6f1f5dce7bbd2..59cf435c18ffc 100644 --- a/br/pkg/task/stream.go +++ b/br/pkg/task/stream.go @@ -54,13 +54,16 @@ import ( "github.com/pingcap/tidb/br/pkg/streamhelper/daemon" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/utils" + "github.com/pingcap/tidb/br/pkg/utils/iter" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/util/cdcutil" "github.com/spf13/pflag" "github.com/tikv/client-go/v2/oracle" clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/multierr" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) const ( @@ -138,6 +141,18 @@ type StreamConfig struct { AdvancerCfg advancercfg.Config `json:"advancer-config" toml:"advancer-config"` } +func DefaultStreamConfig(flagsDef func(*pflag.FlagSet)) StreamConfig { + fs := pflag.NewFlagSet("dummy", pflag.ContinueOnError) + flagsDef(fs) + DefineCommonFlags(fs) + cfg := StreamConfig{} + err := cfg.ParseFromFlags(fs) + if err != nil { + log.Panic("failed to parse backup flags to config", zap.Error(err)) + } + return cfg +} + func (cfg *StreamConfig) makeStorage(ctx context.Context) (storage.ExternalStorage, error) { u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) if err != nil { @@ -1052,13 +1067,13 @@ func RunStreamTruncate(c context.Context, g glue.Glue, cmdName string, cfg *Stre } if cfg.CleanUpCompactions { - est := stream.MigerationExtension(extStorage) + est := stream.MigrationExtension(extStorage) est.Hooks = stream.NewProgressBarHooks(console) newSN := math.MaxInt optPrompt := stream.MMOptInteractiveCheck(func(ctx context.Context, m *backuppb.Migration) bool { console.Println("We are going to do the following: ") tbl := console.CreateTable() - stream.AddMigrationToTable(m, tbl) + est.AddMigrationToTable(ctx, m, tbl) tbl.Print() return console.PromptBool("Continue? ") }) @@ -1152,9 +1167,9 @@ func RunStreamTruncate(c context.Context, g glue.Glue, cmdName string, cfg *Stre return nil } -// checkTaskExists checks whether there is a log backup task running. +// checkConflictingLogBackup checks whether there is a log backup task running. // If so, return an error. -func checkTaskExists(ctx context.Context, cfg *RestoreConfig, etcdCLI *clientv3.Client) error { +func checkConflictingLogBackup(ctx context.Context, cfg *RestoreConfig, etcdCLI *clientv3.Client) error { if err := checkConfigForStatus(cfg.PD); err != nil { return err } @@ -1165,15 +1180,37 @@ func checkTaskExists(ctx context.Context, cfg *RestoreConfig, etcdCLI *clientv3. if err != nil { return err } - if len(tasks) > 0 { - return errors.Errorf("log backup task is running: %s, "+ - "please stop the task before restore, and after PITR operation finished, "+ - "create log-backup task again and create a full backup on this cluster", tasks[0].Info.Name) + for _, task := range tasks { + if err := checkTaskCompat(cfg, task); err != nil { + return err + } } return nil } +func checkTaskCompat(cfg *RestoreConfig, task streamhelper.Task) error { + baseErr := errors.Errorf("log backup task is running: %s, and isn't compatible with your restore."+ + "You may check the extra information to get rid of this. If that doesn't work, you may "+ + "stop the task before restore, and after the restore operation finished, "+ + "create log-backup task again and create a full backup on this cluster.", task.Info.Name) + if len(cfg.FullBackupStorage) > 0 { + return errors.Annotate(baseErr, "you want to do point in time restore, which isn't compatible with an enabled log backup task yet") + } + if !cfg.UserFiltered() { + return errors.Annotate(baseErr, + "you want to restore a whole cluster, you may use `-f` or `restore table|database` to "+ + "specify the tables to restore to continue") + } + if cfg.LocalEncryptionEnabled() { + return errors.Annotate(baseErr, "the data you want to restore is encrypted, they cannot be copied to the log storage") + } + if task.Info.GetSecurityConfig().GetEncryption() != nil { + return errors.Annotate(baseErr, "the running log backup task is encrypted, the data copied to the log storage cannot work") + } + return nil +} + func checkIncompatibleChangefeed(ctx context.Context, backupTS uint64, etcdCLI *clientv3.Client) error { nameSet, err := cdcutil.GetIncompatibleChangefeedsWithSafeTS(ctx, etcdCLI, backupTS) if err != nil { @@ -1294,6 +1331,7 @@ func restoreStream( checkpointTotalKVCount uint64 checkpointTotalSize uint64 currentTS uint64 + extraFields []zapcore.Field mu sync.Mutex startTime = time.Now() ) @@ -1302,18 +1340,20 @@ func restoreStream( summary.Log("restore log failed summary", zap.Error(err)) } else { totalDureTime := time.Since(startTime) - summary.Log("restore log success summary", zap.Duration("total-take", totalDureTime), - zap.Uint64("source-start-point", cfg.StartTS), - zap.Uint64("source-end-point", cfg.RestoreTS), - zap.Uint64("target-end-point", currentTS), - zap.String("source-start", stream.FormatDate(oracle.GetTimeFromTS(cfg.StartTS))), - zap.String("source-end", stream.FormatDate(oracle.GetTimeFromTS(cfg.RestoreTS))), - zap.String("target-end", stream.FormatDate(oracle.GetTimeFromTS(currentTS))), - zap.Uint64("total-kv-count", totalKVCount), - zap.Uint64("skipped-kv-count-by-checkpoint", checkpointTotalKVCount), - zap.String("total-size", units.HumanSize(float64(totalSize))), - zap.String("skipped-size-by-checkpoint", units.HumanSize(float64(checkpointTotalSize))), - zap.String("average-speed", units.HumanSize(float64(totalSize)/totalDureTime.Seconds())+"/s"), + summary.Log("restore log success summary", + append([]zapcore.Field{zap.Duration("total-take", totalDureTime), + zap.Uint64("source-start-point", cfg.StartTS), + zap.Uint64("source-end-point", cfg.RestoreTS), + zap.Uint64("target-end-point", currentTS), + zap.String("source-start", stream.FormatDate(oracle.GetTimeFromTS(cfg.StartTS))), + zap.String("source-end", stream.FormatDate(oracle.GetTimeFromTS(cfg.RestoreTS))), + zap.String("target-end", stream.FormatDate(oracle.GetTimeFromTS(currentTS))), + zap.Uint64("total-kv-count", totalKVCount), + zap.Uint64("skipped-kv-count-by-checkpoint", checkpointTotalKVCount), + zap.String("total-size", units.HumanSize(float64(totalSize))), + zap.String("skipped-size-by-checkpoint", units.HumanSize(float64(checkpointTotalSize))), + zap.String("average-speed (log)", units.HumanSize(float64(totalSize)/totalDureTime.Seconds())+"/s")}, + extraFields...)..., ) } }() @@ -1338,6 +1378,7 @@ func restoreStream( return errors.Annotate(err, "failed to create restore client") } defer client.Close(ctx) + defer client.RestoreSSTStatisticFields(&extraFields) if taskInfo != nil && taskInfo.Metadata != nil { // reuse the task's rewrite ts @@ -1414,7 +1455,8 @@ func restoreStream( if err != nil { return errors.Trace(err) } - client.BuildMigrations(migs) + client.BuildMigrations(migs.Migs) + defer cleanUpWithRetErr(&err, migs.ReadLock.Unlock) // get full backup meta storage to generate rewrite rules. fullBackupStorage, err := parseFullBackupTablesStorage(cfg) @@ -1479,7 +1521,10 @@ func restoreStream( return errors.Trace(err) } - compactionIter := client.LogFileManager.GetCompactionIter(ctx) + numberOfKVsInSST, err := client.LogFileManager.CountExtraSSTTotalKVs(ctx) + if err != nil { + return err + } se, err := g.CreateSession(mgr.GetStorage()) if err != nil { @@ -1489,7 +1534,12 @@ func restoreStream( splitSize, splitKeys := utils.GetRegionSplitInfo(execCtx) log.Info("[Log Restore] get split threshold from tikv config", zap.Uint64("split-size", splitSize), zap.Int64("split-keys", splitKeys)) - pd := g.StartProgress(ctx, "Restore Files(SST + KV)", logclient.TotalEntryCount, !cfg.LogProgress) + addedSSTsIter := client.LogFileManager.GetIngestedSSTs(ctx) + compactionIter := client.LogFileManager.GetCompactionIter(ctx) + sstsIter := iter.ConcatAll(addedSSTsIter, compactionIter) + + totalWorkUnits := numberOfKVsInSST + client.Stats.NumEntries + pd := g.StartProgress(ctx, "Restore Files(SST + Log)", totalWorkUnits, !cfg.LogProgress) err = withProgress(pd, func(p glue.Progress) (pErr error) { updateStatsWithCheckpoint := func(kvCount, size uint64) { mu.Lock() @@ -1502,14 +1552,14 @@ func restoreStream( p.IncBy(int64(kvCount)) } compactedSplitIter, err := client.WrapCompactedFilesIterWithSplitHelper( - ctx, compactionIter, rewriteRules, sstCheckpointSets, + ctx, sstsIter, rewriteRules, sstCheckpointSets, updateStatsWithCheckpoint, splitSize, splitKeys, ) if err != nil { return errors.Trace(err) } - err = client.RestoreCompactedSstFiles(ctx, compactedSplitIter, rewriteRules, importModeSwitcher, p.IncBy) + err = client.RestoreSSTFiles(ctx, compactedSplitIter, rewriteRules, importModeSwitcher, p.IncBy) if err != nil { return errors.Trace(err) } @@ -1954,6 +2004,15 @@ func checkPiTRTaskInfo( return checkInfo, nil } +func cleanUpWithRetErr(errOut *error, f func(ctx context.Context) error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := f(ctx) + if errOut != nil { + *errOut = multierr.Combine(*errOut, err) + } +} + func waitUntilSchemaReload(ctx context.Context, client *logclient.LogClient) error { log.Info("waiting for schema info finishes reloading") reloadStart := time.Now() diff --git a/br/pkg/utils/iter/iter.go b/br/pkg/utils/iter/iter.go index 6f8f280905c2c..118fb162081fc 100644 --- a/br/pkg/utils/iter/iter.go +++ b/br/pkg/utils/iter/iter.go @@ -5,6 +5,7 @@ package iter import ( "context" "fmt" + goiter "iter" ) // IterResult is the result of try to advancing an impure iterator. @@ -121,3 +122,23 @@ func Tap[T any](i TryNextor[T], with func(T)) TryNextor[T] { tapper: with, } } + +// AsSeq wraps an `TryNextor` to a Seq2. +func AsSeq[T any](ctx context.Context, i TryNextor[T]) goiter.Seq2[error, T] { + return func(yield func(error, T) bool) { + for { + res := i.TryNext(ctx) + var cont bool + if res.Err != nil { + cont = yield(res.Err, *new(T)) + } else if res.Finished { + cont = false + } else { + cont = yield(nil, res.Item) + } + if !cont { + break + } + } + } +} diff --git a/errors.toml b/errors.toml index 1f9329951dbdf..a8cafa71dcf7a 100644 --- a/errors.toml +++ b/errors.toml @@ -56,6 +56,16 @@ error = ''' invalid restore range ''' +["BR:Common:ErrMigrationNotFound"] +error = ''' +no migration found +''' + +["BR:Common:ErrMigrationVersionNotSupported"] +error = ''' +the migration version isn't supported +''' + ["BR:Common:ErrUndefinedDbOrTable"] error = ''' undefined restore databases or tables diff --git a/pkg/executor/brie.go b/pkg/executor/brie.go index c9b5a759cbaef..8f0f137f07c37 100644 --- a/pkg/executor/brie.go +++ b/pkg/executor/brie.go @@ -372,11 +372,16 @@ func (b *executorBuilder) buildBRIE(s *ast.BRIEStmt, schema *expression.Schema) case len(s.Tables) != 0: tables := make([]filter.Table, 0, len(s.Tables)) for _, tbl := range s.Tables { - tables = append(tables, filter.Table{Name: tbl.Name.O, Schema: tbl.Schema.O}) + table := filter.Table{Name: tbl.Name.O, Schema: tbl.Schema.O} + tables = append(tables, table) + cfg.FilterStr = append(cfg.FilterStr, table.String()) } cfg.TableFilter = filter.NewTablesFilter(tables...) case len(s.Schemas) != 0: cfg.TableFilter = filter.NewSchemasFilter(s.Schemas...) + for _, schema := range s.Schemas { + cfg.FilterStr = append(cfg.FilterStr, fmt.Sprintf("`%s`.*", schema)) + } default: cfg.TableFilter = filter.All() } diff --git a/pkg/metrics/BUILD.bazel b/pkg/metrics/BUILD.bazel index 643972fbb6ce8..fdead9a11fb01 100644 --- a/pkg/metrics/BUILD.bazel +++ b/pkg/metrics/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "metrics", srcs = [ "bindinfo.go", + "br.go", "ddl.go", "distsql.go", "disttask.go", diff --git a/pkg/metrics/br.go b/pkg/metrics/br.go new file mode 100644 index 0000000000000..d2861a9f6919b --- /dev/null +++ b/pkg/metrics/br.go @@ -0,0 +1,70 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +var ( + // RestoreImportFileSeconds records the time cost for importing a file. + // Including download / queuing. + RestoreImportFileSeconds prometheus.Histogram + // RestoreUploadSSTForPiTRSeconds records the time cost for uploading SST + // files during restoring for future PiTR. + RestoreUploadSSTForPiTRSeconds prometheus.Histogram + // RestoreUploadSSTMetaForPiTRSeconds records the time cost for saving metadata + // of uploaded SSTs for future PiTR. + RestoreUploadSSTMetaForPiTRSeconds prometheus.Histogram + + // RestoreTableCreatedCount counts how many tables created. + RestoreTableCreatedCount prometheus.Counter +) + +// InitBRMetrics initializes all metrics in BR. +func InitBRMetrics() { + RestoreTableCreatedCount = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "BR", + Name: "table_created", + Help: "The count of tables have been created.", + }) + + RestoreImportFileSeconds = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "tidb", + Subsystem: "br", + Name: "restore_import_file_seconds", + + Help: "The time cost for importing a file. (including the time cost in queuing)", + + Buckets: prometheus.ExponentialBuckets(0.01, 4, 14), + }) + + RestoreUploadSSTForPiTRSeconds = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "tidb", + Subsystem: "br", + Name: "restore_upload_sst_for_pitr_seconds", + + Help: "The time cost for uploading SST files for point-in-time recovery", + + Buckets: prometheus.DefBuckets, + }) + + RestoreUploadSSTMetaForPiTRSeconds = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "tidb", + Subsystem: "br", + Name: "restore_upload_sst_meta_for_pitr_seconds", + + Help: "The time cost for uploading SST metadata for point-in-time recovery", + Buckets: prometheus.ExponentialBuckets(0.01, 2, 14), + }) +} diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 07d90de787329..a41c3a78ee3da 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -98,6 +98,10 @@ func InitMetrics() { InitInfoSchemaV2Metrics() timermetrics.InitTimerMetrics() + // For now, those metrics are initialized but not registered. + // They will be printed to log during restoring... + InitBRMetrics() + PanicCounter = NewCounterVec( prometheus.CounterOpts{ Namespace: "tidb", diff --git a/pkg/util/BUILD.bazel b/pkg/util/BUILD.bazel index 06529102379e1..c41a5666afe66 100644 --- a/pkg/util/BUILD.bazel +++ b/pkg/util/BUILD.bazel @@ -57,6 +57,8 @@ go_library( "@io_etcd_go_etcd_client_v3//:client", "@io_etcd_go_etcd_client_v3//concurrency", "@org_golang_google_grpc//:grpc", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//protoadapt", "@org_golang_x_sync//errgroup", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", @@ -99,6 +101,7 @@ go_test( "//pkg/util/memory", "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_kvproto//pkg/brpb", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", "@org_uber_go_atomic//:atomic", diff --git a/pkg/util/util.go b/pkg/util/util.go index ded6218542d59..a29ca22a17e5d 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -30,6 +30,8 @@ import ( "github.com/pingcap/tidb/pkg/parser" "go.uber.org/atomic" "go.uber.org/zap" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) // SliceToMap converts slice to map @@ -289,3 +291,8 @@ func GetRecoverError(r any) error { } return errors.Errorf("%v", r) } + +// ProtoV1Clone clones a V1 proto message. +func ProtoV1Clone[T protoadapt.MessageV1](p T) T { + return protoadapt.MessageV1Of(proto.Clone(protoadapt.MessageV2Of(p))).(T) +} diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index bb7a467a5e203..3955130549165 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/pingcap/errors" + pb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/util/memory" "github.com/stretchr/testify/assert" @@ -120,3 +121,18 @@ func TestIsInCorrectIdentifierName(t *testing.T) { require.Equalf(t, tc.correct, got, "IsInCorrectIdentifierName(%v) != %v", tc.name, tc.correct) } } + +func TestDupProto(t *testing.T) { + p := &pb.StorageBackend{ + Backend: &pb.StorageBackend_S3{ + S3: &pb.S3{ + Endpoint: "127.0.0.1", + }, + }, + } + + p2 := ProtoV1Clone(p) + require.Equal(t, p2.Backend.(*pb.StorageBackend_S3).S3.Endpoint, "127.0.0.1") + p2.Backend.(*pb.StorageBackend_S3).S3.Endpoint = "127.0.0.2" + require.Equal(t, p.Backend.(*pb.StorageBackend_S3).S3.Endpoint, "127.0.0.1") +} diff --git a/tests/realtikvtest/brietest/BUILD.bazel b/tests/realtikvtest/brietest/BUILD.bazel index 08f31926f3355..7efeafa282f19 100644 --- a/tests/realtikvtest/brietest/BUILD.bazel +++ b/tests/realtikvtest/brietest/BUILD.bazel @@ -9,30 +9,44 @@ go_test( "brie_test.go", "main_test.go", "operator_test.go", + "pitr_test.go", ], flaky = True, race = "on", deps = [ + "//br/pkg/glue", + "//br/pkg/gluetidb", + "//br/pkg/logutil", + "//br/pkg/streamhelper", + "//br/pkg/summary", "//br/pkg/task", "//br/pkg/task/operator", "//pkg/config", + "//pkg/domain", "//pkg/executor", + "//pkg/kv", "//pkg/parser/mysql", "//pkg/session", "//pkg/testkit", "//pkg/testkit/testsetup", + "//pkg/util/printer", + "//pkg/util/table-filter", "//tests/realtikvtest", "@com_github_google_uuid//:uuid", "@com_github_pingcap_failpoint//:failpoint", + "@com_github_pingcap_kvproto//pkg/brpb", + "@com_github_pingcap_kvproto//pkg/encryptionpb", "@com_github_pingcap_kvproto//pkg/import_sstpb", "@com_github_pingcap_kvproto//pkg/kvrpcpb", "@com_github_pingcap_log//:log", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//oracle", + "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_pd_client//:client", "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//credentials/insecure", "@org_uber_go_goleak//:goleak", + "@org_uber_go_zap//:zap", "@org_uber_go_zap//zapcore", ], ) diff --git a/tests/realtikvtest/brietest/main_test.go b/tests/realtikvtest/brietest/main_test.go index c8117a7615e96..1b8f820204249 100644 --- a/tests/realtikvtest/brietest/main_test.go +++ b/tests/realtikvtest/brietest/main_test.go @@ -29,7 +29,8 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("github.com/bazelbuild/rules_go/go/tools/bzltestutil.RegisterTimeoutHandler.func1"), goleak.IgnoreTopFunction("github.com/lestrrat-go/httprc.runFetchWorker"), goleak.IgnoreTopFunction("google.golang.org/grpc.(*ccBalancerWrapper).watcher"), - goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*http2Client).keepalive"), + // The top function now is `sync.runtime_notifyListWait`... + goleak.IgnoreAnyFunction("google.golang.org/grpc/internal/transport.(*http2Client).keepalive"), goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*controlBuffer).get"), goleak.IgnoreTopFunction("google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run"), goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), diff --git a/tests/realtikvtest/brietest/pitr_test.go b/tests/realtikvtest/brietest/pitr_test.go new file mode 100644 index 0000000000000..2ad485d13c1b6 --- /dev/null +++ b/tests/realtikvtest/brietest/pitr_test.go @@ -0,0 +1,636 @@ +// Copyright 2025 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package brietest + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "math" + "os" + "path/filepath" + "regexp" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/pingcap/failpoint" + backup "github.com/pingcap/kvproto/pkg/brpb" + "github.com/pingcap/kvproto/pkg/encryptionpb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/br/pkg/glue" + "github.com/pingcap/tidb/br/pkg/gluetidb" + "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/br/pkg/streamhelper" + "github.com/pingcap/tidb/br/pkg/summary" + "github.com/pingcap/tidb/br/pkg/task" + "github.com/pingcap/tidb/br/pkg/task/operator" + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/session" + "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util/printer" + filter "github.com/pingcap/tidb/pkg/util/table-filter" + "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/oracle" + "github.com/tikv/client-go/v2/tikv" + pd "github.com/tikv/pd/client" + "go.uber.org/zap" +) + +type TestKitGlue struct { + tk *testkit.TestKit +} + +func (tk TestKitGlue) GetDomain(_ kv.Storage) (*domain.Domain, error) { + return domain.GetDomain(tk.tk.Session()), nil +} + +func (tk TestKitGlue) CreateSession(_ kv.Storage) (glue.Session, error) { + se, err := session.CreateSession(tk.tk.Session().GetStore()) + if err != nil { + return nil, err + } + return gluetidb.WrapSession(se), nil +} + +func (tk TestKitGlue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + return tk.tk.Session().GetStore(), nil +} + +// OwnsStorage returns whether the storage returned by Open() is owned +// If this method returns false, the connection manager will never close the storage. +func (tk TestKitGlue) OwnsStorage() bool { + return false +} + +func (tk TestKitGlue) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { + return &CounterProgress{} +} + +// Record records some information useful for log-less summary. +func (tk TestKitGlue) Record(name string, value uint64) {} + +// GetVersion gets BR package version to run backup/restore job +func (tk TestKitGlue) GetVersion() string { + return "In Test\n" + printer.GetTiDBInfo() +} + +// UseOneShotSession temporary creates session from store when run backup job. +// because we don't have to own domain/session during the whole backup. +// we can close domain as soon as possible. +// and we must reuse the exists session and don't close it in SQL backup job. +func (tk TestKitGlue) UseOneShotSession(_ kv.Storage, _ bool, fn func(se glue.Session) error) error { + return fn(gluetidb.WrapSession(tk.tk.Session())) +} + +// GetClient returns the client type of the glue +func (tk TestKitGlue) GetClient() glue.GlueClient { + return glue.ClientSql +} + +type CounterProgress struct { + Counter atomic.Int64 +} + +func (c *CounterProgress) Inc() { + c.Counter.Add(1) +} + +func (c *CounterProgress) IncBy(cnt int64) { + c.Counter.Add(cnt) +} + +func (c *CounterProgress) GetCurrent() int64 { + return c.Counter.Load() +} + +func (c *CounterProgress) Close() { +} + +type LogBackupKit struct { + t *testing.T + tk *testkit.TestKit + metaCli *streamhelper.MetaDataClient + base string + + checkerF func(err error) +} + +func NewLogBackupKit(t *testing.T) *LogBackupKit { + tk := initTestKit(t) + metaCli := streamhelper.NewMetaDataClient(domain.GetDomain(tk.Session()).EtcdClient()) + begin := time.Now() + // So the cases can finish faster... + tk.MustExec("set config tikv `log-backup.max-flush-interval` = '30s';") + t.Cleanup(func() { + if !t.Failed() { + log.Info("[TEST.LogBackupKit] success", zap.String("case", t.Name()), zap.Stringer("takes", time.Since(begin))) + } + }) + return &LogBackupKit{ + tk: tk, + t: t, + metaCli: metaCli, + base: t.TempDir(), + checkerF: func(err error) { + require.NoError(t, err) + }, + } +} + +func (kit *LogBackupKit) tempFile(name string, content []byte) string { + path := filepath.Join(kit.t.TempDir(), name) + require.NoError(kit.t, os.WriteFile(path, content, 0o666)) + return path +} + +func (kit *LogBackupKit) RunFullRestore(extConfig func(*task.RestoreConfig)) { + kit.runAndCheck(func(ctx context.Context) error { + cfg := task.DefaultRestoreConfig(task.DefaultConfig()) + cfg.Storage = kit.LocalURI("full") + cfg.FilterStr = []string{"test.*"} + var err error + cfg.TableFilter, err = filter.Parse(cfg.FilterStr) + cfg.CheckRequirements = false + cfg.WithSysTable = false + require.NoError(kit.t, err) + cfg.UseCheckpoint = false + + extConfig(&cfg) + return task.RunRestore(ctx, kit.Glue(), task.FullRestoreCmd, &cfg) + }) +} + +func (kit *LogBackupKit) RunStreamRestore(extConfig func(*task.RestoreConfig)) { + kit.runAndCheck(func(ctx context.Context) error { + cfg := task.DefaultRestoreConfig(task.DefaultConfig()) + cfg.Storage = kit.LocalURI("incr") + cfg.FullBackupStorage = kit.LocalURI("full") + cfg.CheckRequirements = false + cfg.UseCheckpoint = false + cfg.WithSysTable = false + + extConfig(&cfg) + return task.RunRestore(ctx, kit.Glue(), task.PointRestoreCmd, &cfg) + }) +} + +func (kit *LogBackupKit) SetFilter(cfg *task.Config, f ...string) { + var err error + cfg.TableFilter, err = filter.Parse(f) + require.NoError(kit.t, err) + cfg.FilterStr = f + cfg.ExplicitFilter = true +} + +func (kit *LogBackupKit) RunFullBackup(extConfig func(*task.BackupConfig)) { + kit.runAndCheck(func(ctx context.Context) error { + cfg := task.DefaultBackupConfig(task.DefaultConfig()) + cfg.Storage = kit.LocalURI("full") + + extConfig(&cfg) + return task.RunBackup(ctx, kit.Glue(), "backup full[intest]", &cfg) + }) +} + +func (kit *LogBackupKit) StopTaskIfExists(taskName string) { + kit.runAndCheck(func(ctx context.Context) error { + cfg := task.DefaultStreamConfig(task.DefineStreamCommonFlags) + cfg.TaskName = taskName + err := task.RunStreamStop(ctx, kit.Glue(), "stream stop[intest]", &cfg) + if err != nil && strings.Contains(err.Error(), "task not found") { + return nil + } + return err + }) +} + +func (kit *LogBackupKit) RunLogStart(taskName string, extConfig func(*task.StreamConfig)) { + kit.runAndCheck(func(ctx context.Context) error { + cfg := task.DefaultStreamConfig(task.DefineStreamStartFlags) + cfg.Storage = kit.LocalURI("incr") + cfg.TaskName = taskName + cfg.EndTS = math.MaxUint64 + cfg.TableFilter = filter.All() + cfg.FilterStr = []string{"*.*"} + extConfig(&cfg) + err := task.RunStreamStart(ctx, kit.Glue(), "stream start[intest]", &cfg) + return err + }) + kit.t.Cleanup(func() { kit.StopTaskIfExists(taskName) }) +} + +func (kit *LogBackupKit) ctx() context.Context { + return context.Background() +} + +func (kit *LogBackupKit) TSO() uint64 { + ts, err := kit.tk.Session().GetStore().(tikv.Storage).GetOracle().GetTimestamp(kit.ctx(), &oracle.Option{}) + require.NoError(kit.t, err) + return ts +} + +func (kit *LogBackupKit) LocalURI(rel ...string) string { + return "local://" + kit.base + "/" + filepath.Join(rel...) +} + +func (kit *LogBackupKit) CheckpointTSOf(taskName string) uint64 { + task, err := kit.metaCli.GetTask(kit.ctx(), taskName) + require.NoError(kit.t, err) + ts, err := task.GetGlobalCheckPointTS(kit.ctx()) + require.NoError(kit.t, err) + return ts +} + +func (kit *LogBackupKit) Glue() glue.Glue { + return &TestKitGlue{tk: kit.tk} +} + +func (kit *LogBackupKit) WithChecker(checker func(v error), f func()) { + oldExpected := kit.checkerF + defer func() { + kit.checkerF = oldExpected + }() + kit.checkerF = checker + + f() +} + +func (kit *LogBackupKit) runAndCheck(f func(context.Context) error) { + ctx, cancel := context.WithCancel(context.Background()) + begin := time.Now() + summary.SetSuccessStatus(false) + err := f(ctx) + cancel() + kit.checkerF(err) + log.Info("[TEST.runAndCheck] A task finished.", zap.StackSkip("caller", 1), zap.Stringer("take", time.Since(begin))) +} + +func (kit *LogBackupKit) forceFlush() { + kit.runAndCheck(func(ctx context.Context) error { + cfg := task.DefaultConfig() + cfg.PD = append(cfg.PD, config.GetGlobalConfig().Path) + err := operator.RunForceFlush(ctx, &operator.ForceFlushConfig{ + Config: cfg, + StoresPattern: regexp.MustCompile(".*"), + }) + if err != nil { + log.Warn("[TEST.forceFlush] It seems this version of TiKV doesn't support force flush, the test may be much more slower.", + logutil.ShortError(err)) + } + return nil + }) +} + +func (kit *LogBackupKit) forceFlushAndWait(taskName string) { + ts := kit.TSO() + start := time.Now() + kit.forceFlush() + require.Eventually(kit.t, func() bool { + ckpt := kit.CheckpointTSOf(taskName) + log.Info("[TEST.forceFlushAndWait] checkpoint", zap.Uint64("checkpoint", ckpt), zap.Uint64("ts", ts)) + return ckpt >= ts + }, 300*time.Second, 1*time.Second) + time.Sleep(6 * time.Second) // Wait the storage checkpoint uploaded... + log.Info("[TEST.forceFlushAndWait] done", zap.Stringer("take", time.Since(start))) +} + +func (kit *LogBackupKit) simpleWorkload() simpleWorkload { + return simpleWorkload{ + tbl: kit.t.Name(), + } +} + +type simpleWorkload struct { + tbl string +} + +func (s simpleWorkload) createSimpleTableWithData(kit *LogBackupKit) { + kit.tk.MustExec(fmt.Sprintf("DROP TABLE IF EXISTs test.%s", s.tbl)) + kit.tk.MustExec(fmt.Sprintf("CREATE TABLE test.%s(t text)", s.tbl)) + kit.tk.MustExec(fmt.Sprintf("INSERT INTO test.%s VALUES ('Ear'), ('Eye'), ('Nose')", s.tbl)) +} + +func (s simpleWorkload) insertSimpleIncreaseData(kit *LogBackupKit) { + kit.tk.MustExec(fmt.Sprintf("INSERT INTO test.%s VALUES ('Body')", s.tbl)) + kit.tk.MustExec(fmt.Sprintf("INSERT INTO test.%s VALUES ('Mind')", s.tbl)) +} + +func (s simpleWorkload) verifySimpleData(kit *LogBackupKit) { + kit.tk.MustQuery(fmt.Sprintf("SELECT * FROM test.%s", s.tbl)).Check([][]any{{"Ear"}, {"Eye"}, {"Nose"}, {"Body"}, {"Mind"}}) +} + +func (s simpleWorkload) cleanSimpleData(kit *LogBackupKit) { + kit.tk.MustExec(fmt.Sprintf("DROP TABLE IF EXISTS test.%s", s.tbl)) +} + +func TestPiTRAndBackupInSQL(t *testing.T) { + kit := NewLogBackupKit(t) + s := kit.simpleWorkload() + s.createSimpleTableWithData(kit) + s.insertSimpleIncreaseData(kit) + + taskName := t.Name() + kit.RunFullBackup(func(bc *task.BackupConfig) {}) + s.cleanSimpleData(kit) + + ts := kit.TSO() + kit.RunFullBackup(func(bc *task.BackupConfig) { + bc.Storage = kit.LocalURI("full2") + bc.BackupTS = ts + }) + kit.RunLogStart(taskName, func(sc *task.StreamConfig) { + sc.StartTS = ts + }) + _ = kit.tk.MustQuery(fmt.Sprintf("RESTORE TABLE test.%s FROM '%s'", t.Name(), kit.LocalURI("full"))) + s.verifySimpleData(kit) + kit.forceFlushAndWait(taskName) + + s.cleanSimpleData(kit) + kit.StopTaskIfExists(taskName) + kit.RunStreamRestore(func(rc *task.RestoreConfig) { + rc.FullBackupStorage = kit.LocalURI("full2") + }) + s.verifySimpleData(kit) +} + +func TestPiTRAndRestoreFromMid(t *testing.T) { + kit := NewLogBackupKit(t) + s := kit.simpleWorkload() + s.createSimpleTableWithData(kit) + s.insertSimpleIncreaseData(kit) + + taskName := t.Name() + + kit.RunFullBackup(func(bc *task.BackupConfig) { + kit.SetFilter(&bc.Config, fmt.Sprintf("test.%s", s.tbl)) + bc.Storage = kit.LocalURI("fulla") + }) + s.cleanSimpleData(kit) + + s2 := kit.simpleWorkload() + s2.tbl += "2" + s2.createSimpleTableWithData(kit) + s2.insertSimpleIncreaseData(kit) + kit.RunFullBackup(func(bc *task.BackupConfig) { + kit.SetFilter(&bc.Config, fmt.Sprintf("test.%s", s2.tbl)) + bc.Storage = kit.LocalURI("fullb") + }) + s2.cleanSimpleData(kit) + + kit.RunLogStart(taskName, func(sc *task.StreamConfig) {}) + kit.RunFullRestore(func(rc *task.RestoreConfig) { + rc.Storage = kit.LocalURI("fulla") + kit.SetFilter(&rc.Config, fmt.Sprintf("test.%s", s.tbl)) + }) + s.cleanSimpleData(kit) + + ts2 := kit.TSO() + kit.RunFullBackup(func(bc *task.BackupConfig) { + bc.Storage = kit.LocalURI("pitr_base_2") + bc.BackupTS = ts2 + }) + kit.RunFullRestore(func(rc *task.RestoreConfig) { + rc.Storage = kit.LocalURI("fullb") + kit.SetFilter(&rc.Config, fmt.Sprintf("test.%s", s2.tbl)) + }) + + kit.forceFlushAndWait(taskName) + s.cleanSimpleData(kit) + s2.cleanSimpleData(kit) + kit.StopTaskIfExists(taskName) + kit.RunStreamRestore(func(rc *task.RestoreConfig) { + rc.FullBackupStorage = kit.LocalURI("pitr_base_2") + }) + s2.verifySimpleData(kit) + kit.tk.MustQuery("SELECT * FROM information_schema.tables WHERE table_name = ?", s.tbl).Check([][]any{}) +} + +func TestPiTRAndManyBackups(t *testing.T) { + kit := NewLogBackupKit(t) + s := kit.simpleWorkload() + s.createSimpleTableWithData(kit) + s.insertSimpleIncreaseData(kit) + + taskName := t.Name() + + kit.RunFullBackup(func(bc *task.BackupConfig) { + kit.SetFilter(&bc.Config, fmt.Sprintf("test.%s", s.tbl)) + bc.Storage = kit.LocalURI("fulla") + }) + s.cleanSimpleData(kit) + + s2 := kit.simpleWorkload() + s2.tbl += "2" + s2.createSimpleTableWithData(kit) + s2.insertSimpleIncreaseData(kit) + kit.RunFullBackup(func(bc *task.BackupConfig) { + kit.SetFilter(&bc.Config, fmt.Sprintf("test.%s", s2.tbl)) + bc.Storage = kit.LocalURI("fullb") + }) + s2.cleanSimpleData(kit) + + ts := kit.TSO() + kit.RunFullBackup(func(bc *task.BackupConfig) { + bc.Storage = kit.LocalURI("pitr_base") + bc.BackupTS = ts + }) + kit.RunLogStart(taskName, func(sc *task.StreamConfig) { + sc.StartTS = ts + }) + kit.RunFullRestore(func(rc *task.RestoreConfig) { + rc.Storage = kit.LocalURI("fulla") + kit.SetFilter(&rc.Config, fmt.Sprintf("test.%s", s.tbl)) + }) + kit.RunFullRestore(func(rc *task.RestoreConfig) { + rc.Storage = kit.LocalURI("fullb") + kit.SetFilter(&rc.Config, fmt.Sprintf("test.%s", s2.tbl)) + }) + + kit.forceFlushAndWait(taskName) + s.cleanSimpleData(kit) + s2.cleanSimpleData(kit) + kit.StopTaskIfExists(taskName) + kit.RunStreamRestore(func(rc *task.RestoreConfig) { + rc.FullBackupStorage = kit.LocalURI("pitr_base") + }) + s.verifySimpleData(kit) + s2.verifySimpleData(kit) +} + +func TestPiTRAndEncryptedFullBackup(t *testing.T) { + kit := NewLogBackupKit(t) + s := kit.simpleWorkload() + s.createSimpleTableWithData(kit) + keyContent, err := hex.DecodeString("9d4cf8f268514d2c38836197008eded1050a5806afa632f7ab1e313bb6697da2") + require.NoError(t, err) + + kit.RunFullBackup(func(bc *task.BackupConfig) { + bc.CipherInfo = backup.CipherInfo{ + CipherType: encryptionpb.EncryptionMethod_AES256_CTR, + CipherKey: keyContent, + } + }) + + s.cleanSimpleData(kit) + kit.RunLogStart(t.Name(), func(sc *task.StreamConfig) {}) + chk := func(err error) { require.ErrorContains(t, err, "the data you want to restore is encrypted") } + kit.WithChecker(chk, func() { + kit.RunFullRestore(func(rc *task.RestoreConfig) { + rc.CipherInfo = backup.CipherInfo{ + CipherType: encryptionpb.EncryptionMethod_AES256_CTR, + CipherKey: keyContent, + } + }) + }) +} + +func TestPiTRAndEncryptedLogBackup(t *testing.T) { + kit := NewLogBackupKit(t) + s := kit.simpleWorkload() + s.createSimpleTableWithData(kit) + + keyContent, err := hex.DecodeString("0ae31c060ff933cabe842430e1716185cc9c6b5cdde8e56976afaff41b92528f") + require.NoError(t, err) + keyFile := kit.tempFile("KEY", keyContent) + + kit.RunFullBackup(func(bc *task.BackupConfig) {}) + s.cleanSimpleData(kit) + + kit.RunLogStart(t.Name(), func(sc *task.StreamConfig) { + sc.MasterKeyConfig.EncryptionType = encryptionpb.EncryptionMethod_AES256_CTR + sc.MasterKeyConfig.MasterKeys = append(sc.MasterKeyConfig.MasterKeys, &encryptionpb.MasterKey{ + Backend: &encryptionpb.MasterKey_File{ + File: &encryptionpb.MasterKeyFile{ + Path: keyFile, + }, + }, + }) + }) + + chk := func(err error) { require.ErrorContains(t, err, "the running log backup task is encrypted") } + kit.WithChecker(chk, func() { + kit.RunFullRestore(func(rc *task.RestoreConfig) {}) + }) +} + +func TestPiTRAndBothEncrypted(t *testing.T) { + kit := NewLogBackupKit(t) + s := kit.simpleWorkload() + s.createSimpleTableWithData(kit) + + keyContent, err := hex.DecodeString("319b4a104651746f1bf1ad67c9ba7d635d8c4769b03f3e5c63f1da93891ce4f9") + require.NoError(t, err) + keyFile := kit.tempFile("KEY", keyContent) + + kit.RunFullBackup(func(bc *task.BackupConfig) { + bc.CipherInfo = backup.CipherInfo{ + CipherType: encryptionpb.EncryptionMethod_AES256_CTR, + CipherKey: keyContent, + } + }) + s.cleanSimpleData(kit) + + kit.RunLogStart(t.Name(), func(sc *task.StreamConfig) { + sc.MasterKeyConfig.EncryptionType = encryptionpb.EncryptionMethod_AES256_CTR + sc.MasterKeyConfig.MasterKeys = append(sc.MasterKeyConfig.MasterKeys, &encryptionpb.MasterKey{ + Backend: &encryptionpb.MasterKey_File{ + File: &encryptionpb.MasterKeyFile{ + Path: keyFile, + }, + }, + }) + }) + + chk := func(err error) { require.ErrorContains(t, err, "encrypted") } + kit.WithChecker(chk, func() { + kit.RunFullRestore(func(rc *task.RestoreConfig) { + rc.CipherInfo = backup.CipherInfo{ + CipherType: encryptionpb.EncryptionMethod_AES256_CTR, + CipherKey: keyContent, + } + }) + }) +} + +func TestPiTRAndFailureRestore(t *testing.T) { + kit := NewLogBackupKit(t) + s := kit.simpleWorkload() + s.createSimpleTableWithData(kit) + s.insertSimpleIncreaseData(kit) + + taskName := t.Name() + kit.RunFullBackup(func(bc *task.BackupConfig) {}) + s.cleanSimpleData(kit) + + ts := kit.TSO() + kit.RunFullBackup(func(bc *task.BackupConfig) { + bc.Storage = kit.LocalURI("full2") + bc.BackupTS = ts + }) + kit.RunLogStart(taskName, func(sc *task.StreamConfig) { + sc.StartTS = ts + }) + require.NoError(t, failpoint.EnableCall("github.com/pingcap/tidb/br/pkg/task/run-snapshot-restore-about-to-finish", func(e *error) { + *e = errors.New("not my fault") + })) + checker := func(e error) { require.Error(t, e) } + kit.WithChecker(checker, func() { + kit.RunFullRestore(func(rc *task.RestoreConfig) { + rc.UseCheckpoint = false + }) + }) + kit.forceFlushAndWait(taskName) + + s.cleanSimpleData(kit) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/br/pkg/task/run-snapshot-restore-about-to-finish")) + + kit.StopTaskIfExists(taskName) + kit.RunStreamRestore(func(rc *task.RestoreConfig) { + rc.FullBackupStorage = kit.LocalURI("full2") + }) + res := kit.tk.MustQuery(fmt.Sprintf("SELECT COUNT(*) FROM test.%s", t.Name())) + res.Check([][]any{{"0"}}) +} + +func TestPiTRAndIncrementalRestore(t *testing.T) { + kit := NewLogBackupKit(t) + s := kit.simpleWorkload() + s.createSimpleTableWithData(kit) + kit.RunFullBackup(func(bc *task.BackupConfig) { + kit.SetFilter(&bc.Config, fmt.Sprintf("test.%s", s.tbl)) + }) + s.insertSimpleIncreaseData(kit) + ts := kit.TSO() + kit.RunFullBackup(func(bc *task.BackupConfig) { + kit.SetFilter(&bc.Config, fmt.Sprintf("test.%s", s.tbl)) + bc.Storage = kit.LocalURI("incr-legacy") + bc.LastBackupTS = ts + }) + s.cleanSimpleData(kit) + + kit.RunLogStart("dummy", func(sc *task.StreamConfig) {}) + kit.RunFullRestore(func(rc *task.RestoreConfig) {}) + chk := func(err error) { require.ErrorContains(t, err, "BR:Stream:ErrStreamLogTaskExist") } + kit.WithChecker(chk, func() { + kit.RunFullRestore(func(rc *task.RestoreConfig) { + rc.Storage = kit.LocalURI("incr-legacy") + }) + }) +} From 5bf3e381f28580bd82bed2ee8e642fc0bf5e841f Mon Sep 17 00:00:00 2001 From: hillium Date: Thu, 16 Jan 2025 15:43:30 +0800 Subject: [PATCH 13/17] fix build Signed-off-by: hillium --- br/pkg/restore/snap_client/BUILD.bazel | 3 +-- br/pkg/storage/local.go | 1 - br/pkg/stream/BUILD.bazel | 3 ++- br/pkg/streamhelper/basic_lib_for_test.go | 6 ++++++ br/pkg/task/BUILD.bazel | 3 +-- br/pkg/task/operator/BUILD.bazel | 1 - br/pkg/task/operator/force_flush.go | 3 +-- br/pkg/task/restore.go | 1 - go.mod | 2 +- 9 files changed, 12 insertions(+), 11 deletions(-) diff --git a/br/pkg/restore/snap_client/BUILD.bazel b/br/pkg/restore/snap_client/BUILD.bazel index 8983aa6e671ef..86d66397325b0 100644 --- a/br/pkg/restore/snap_client/BUILD.bazel +++ b/br/pkg/restore/snap_client/BUILD.bazel @@ -41,9 +41,8 @@ go_library( "//pkg/kv", "//pkg/meta", "//pkg/meta/model", - "//pkg/parser/model", "//pkg/metrics", - "//pkg/parser/ast", + "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/tablecodec", "//pkg/util", diff --git a/br/pkg/storage/local.go b/br/pkg/storage/local.go index 2e0c0bf725188..f00d3a199673e 100644 --- a/br/pkg/storage/local.go +++ b/br/pkg/storage/local.go @@ -15,7 +15,6 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" - "github.com/pingcap/tidb/br/pkg/logutil" "go.uber.org/zap" ) diff --git a/br/pkg/stream/BUILD.bazel b/br/pkg/stream/BUILD.bazel index 2e600464b60d1..b6184a3fc7626 100644 --- a/br/pkg/stream/BUILD.bazel +++ b/br/pkg/stream/BUILD.bazel @@ -34,6 +34,7 @@ go_library( "//pkg/util/codec", "//pkg/util/mathutil", "//pkg/util/table-filter", + "//pkg/util/versioninfo", "@com_github_docker_go_units//:go-units", "@com_github_fatih_color//:color", "@com_github_google_uuid//:uuid", @@ -66,7 +67,7 @@ go_test( ], embed = [":stream"], flaky = True, - shard_count = 48, + shard_count = 49, deps = [ "//br/pkg/storage", "//br/pkg/streamhelper", diff --git a/br/pkg/streamhelper/basic_lib_for_test.go b/br/pkg/streamhelper/basic_lib_for_test.go index 22a66c18e27af..62f11869b239b 100644 --- a/br/pkg/streamhelper/basic_lib_for_test.go +++ b/br/pkg/streamhelper/basic_lib_for_test.go @@ -94,6 +94,12 @@ type fakeStore struct { onGetRegionCheckpoint func(*logbackup.GetLastFlushTSOfRegionRequest) error } +// FlushNow implements logbackup.LogBackupClient. +func (f *fakeStore) FlushNow(ctx context.Context, in *logbackup.FlushNowRequest, opts ...grpc.CallOption) (*logbackup.FlushNowResponse, error) { + f.flush() + return &logbackup.FlushNowResponse{}, nil +} + type fakeCluster struct { mu sync.Mutex idAlloced uint64 diff --git a/br/pkg/task/BUILD.bazel b/br/pkg/task/BUILD.bazel index 092788e4ea4f6..eae2e8c9499a8 100644 --- a/br/pkg/task/BUILD.bazel +++ b/br/pkg/task/BUILD.bazel @@ -59,9 +59,8 @@ go_library( "//pkg/infoschema/context", "//pkg/kv", "//pkg/meta/model", - "//pkg/parser/model", "//pkg/metrics", - "//pkg/parser/ast", + "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", diff --git a/br/pkg/task/operator/BUILD.bazel b/br/pkg/task/operator/BUILD.bazel index 6d232d6c36bf0..508d29b6fbe52 100644 --- a/br/pkg/task/operator/BUILD.bazel +++ b/br/pkg/task/operator/BUILD.bazel @@ -43,7 +43,6 @@ go_library( "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_client_go_v2//util", "@com_github_tikv_pd_client//:client", - "@com_github_tikv_pd_client//opt", "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//keepalive", "@org_golang_x_sync//errgroup", diff --git a/br/pkg/task/operator/force_flush.go b/br/pkg/task/operator/force_flush.go index 00712e8e56a07..68363cd10f632 100644 --- a/br/pkg/task/operator/force_flush.go +++ b/br/pkg/task/operator/force_flush.go @@ -10,14 +10,13 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/util/engine" pd "github.com/tikv/pd/client" - "github.com/tikv/pd/client/opt" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" ) func getAllTiKVs(ctx context.Context, p pd.Client) ([]*metapb.Store, error) { - stores, err := p.GetAllStores(ctx, opt.WithExcludeTombstone()) + stores, err := p.GetAllStores(ctx, pd.WithExcludeTombstone()) if err != nil { return nil, err } diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index e903063ad731d..446e3557c6fd6 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -41,7 +41,6 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/parser/ast" pmodel "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/collate" diff --git a/go.mod b/go.mod index cb41fd518de71..ea2f1f5a35cbe 100644 --- a/go.mod +++ b/go.mod @@ -86,8 +86,8 @@ require ( github.com/pingcap/errors v0.11.5-0.20240318064555-6bd07397691f github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 github.com/pingcap/fn v1.0.0 - github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63 + github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5 github.com/pingcap/tidb/pkg/parser v0.0.0-20211011031125-9b13dc409c5e github.com/pingcap/tipb v0.0.0-20241022082558-0607513e7fa4 From e72bb585d3e136c9e121f2dc66a9b50890be355b Mon Sep 17 00:00:00 2001 From: hillium Date: Thu, 16 Jan 2025 17:42:10 +0800 Subject: [PATCH 14/17] use kvproto from release-8.5 Signed-off-by: hillium --- br/cmd/br/operator.go | 19 ----- br/pkg/restore/snap_client/pitr_collector.go | 4 - br/pkg/streamhelper/basic_lib_for_test.go | 6 -- br/pkg/task/operator/config.go | 28 ------- br/pkg/task/operator/force_flush.go | 77 -------------------- go.mod | 3 +- go.sum | 3 + tests/realtikvtest/brietest/pitr_test.go | 17 +---- 8 files changed, 7 insertions(+), 150 deletions(-) delete mode 100644 br/pkg/task/operator/force_flush.go diff --git a/br/cmd/br/operator.go b/br/cmd/br/operator.go index abd0156a5457b..37ec8f29d1fc2 100644 --- a/br/cmd/br/operator.go +++ b/br/cmd/br/operator.go @@ -35,7 +35,6 @@ func newOperatorCommand() *cobra.Command { cmd.AddCommand(newBase64ifyCommand()) cmd.AddCommand(newListMigrationsCommand()) cmd.AddCommand(newMigrateToCommand()) - cmd.AddCommand(newForceFlushCommand()) cmd.AddCommand(newChecksumCommand()) return cmd } @@ -133,21 +132,3 @@ func newChecksumCommand() *cobra.Command { operator.DefineFlagsForChecksumTableConfig(cmd.Flags()) return cmd } - -func newForceFlushCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "force-flush", - Short: "force a log backup task to flush", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - cfg := operator.ForceFlushConfig{} - if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { - return err - } - ctx := GetDefaultContext() - return operator.RunForceFlush(ctx, &cfg) - }, - } - operator.DefineFlagsForForceFlushConfig(cmd.Flags()) - return cmd -} diff --git a/br/pkg/restore/snap_client/pitr_collector.go b/br/pkg/restore/snap_client/pitr_collector.go index 37a04156062cd..9b996c583f2e7 100644 --- a/br/pkg/restore/snap_client/pitr_collector.go +++ b/br/pkg/restore/snap_client/pitr_collector.go @@ -215,10 +215,6 @@ func (c *pitrCollector) verifyCompatibilityFor(fileset *restore.BackupFileSet) e return errors.Annotatef(berrors.ErrUnsupportedOperation, "rewrite rule #%d: rewrite timestamp isn't supported when log backup enabled", i) } - if r.IgnoreAfterTimestamp > 0 || r.IgnoreBeforeTimestamp > 0 { - return errors.Annotatef(berrors.ErrUnsupportedOperation, - "rewrite rule #%d: truncating timestamp isn't supported when log backup enabled", i) - } } return nil } diff --git a/br/pkg/streamhelper/basic_lib_for_test.go b/br/pkg/streamhelper/basic_lib_for_test.go index 62f11869b239b..22a66c18e27af 100644 --- a/br/pkg/streamhelper/basic_lib_for_test.go +++ b/br/pkg/streamhelper/basic_lib_for_test.go @@ -94,12 +94,6 @@ type fakeStore struct { onGetRegionCheckpoint func(*logbackup.GetLastFlushTSOfRegionRequest) error } -// FlushNow implements logbackup.LogBackupClient. -func (f *fakeStore) FlushNow(ctx context.Context, in *logbackup.FlushNowRequest, opts ...grpc.CallOption) (*logbackup.FlushNowResponse, error) { - f.flush() - return &logbackup.FlushNowResponse{}, nil -} - type fakeCluster struct { mu sync.Mutex idAlloced uint64 diff --git a/br/pkg/task/operator/config.go b/br/pkg/task/operator/config.go index 8ccf1ef6266b5..fb676ea68405e 100644 --- a/br/pkg/task/operator/config.go +++ b/br/pkg/task/operator/config.go @@ -3,7 +3,6 @@ package operator import ( - "regexp" "time" "github.com/pingcap/errors" @@ -16,7 +15,6 @@ import ( const ( flagTableConcurrency = "table-concurrency" - flagStorePatterns = "stores" flagTTL = "ttl" flagSafePoint = "safepoint" flagStorage = "storage" @@ -189,32 +187,6 @@ func (cfg *MigrateToConfig) Verify() error { return nil } -type ForceFlushConfig struct { - task.Config - - // StoresPattern matches the address of TiKV. - // The address usually looks like ":20160". - // You may list the store by `pd-ctl stores`. - StoresPattern *regexp.Regexp -} - -func DefineFlagsForForceFlushConfig(f *pflag.FlagSet) { - f.String(flagStorePatterns, ".*", "The regexp to match the store peer address to be force flushed.") -} - -func (cfg *ForceFlushConfig) ParseFromFlags(flags *pflag.FlagSet) (err error) { - storePat, err := flags.GetString(flagStorePatterns) - if err != nil { - return err - } - cfg.StoresPattern, err = regexp.Compile(storePat) - if err != nil { - return errors.Annotatef(err, "invalid expression in --%s", flagStorePatterns) - } - - return cfg.Config.ParseFromFlags(flags) -} - type ChecksumWithRewriteRulesConfig struct { task.Config } diff --git a/br/pkg/task/operator/force_flush.go b/br/pkg/task/operator/force_flush.go deleted file mode 100644 index 68363cd10f632..0000000000000 --- a/br/pkg/task/operator/force_flush.go +++ /dev/null @@ -1,77 +0,0 @@ -package operator - -import ( - "context" - "slices" - - "github.com/pingcap/errors" - logbackup "github.com/pingcap/kvproto/pkg/logbackuppb" - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/log" - "github.com/pingcap/tidb/pkg/util/engine" - pd "github.com/tikv/pd/client" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" -) - -func getAllTiKVs(ctx context.Context, p pd.Client) ([]*metapb.Store, error) { - stores, err := p.GetAllStores(ctx, pd.WithExcludeTombstone()) - if err != nil { - return nil, err - } - withoutTiFlash := slices.DeleteFunc(stores, engine.IsTiFlash) - return withoutTiFlash, err -} - -func RunForceFlush(ctx context.Context, cfg *ForceFlushConfig) error { - pdMgr, err := dialPD(ctx, &cfg.Config) - if err != nil { - return err - } - defer pdMgr.Close() - - stores, err := createStoreManager(pdMgr.GetPDClient(), &cfg.Config) - if err != nil { - return err - } - defer stores.Close() - - tikvs, err := getAllTiKVs(ctx, pdMgr.GetPDClient()) - if err != nil { - return err - } - eg, ectx := errgroup.WithContext(ctx) - log.Info("About to start force flushing.", zap.Stringer("stores-pattern", cfg.StoresPattern)) - for _, s := range tikvs { - if !cfg.StoresPattern.MatchString(s.Address) || engine.IsTiFlash(s) { - log.Info("Skipping TiFlash or not matched TiKV.", - zap.Uint64("store", s.GetId()), zap.String("addr", s.Address), zap.Bool("tiflash?", engine.IsTiFlash(s))) - continue - } - - log.Info("Starting force flush TiKV.", zap.Uint64("store", s.GetId()), zap.String("addr", s.Address)) - eg.Go(func() error { - var logBackupCli logbackup.LogBackupClient - err := stores.WithConn(ectx, s.GetId(), func(cc *grpc.ClientConn) { - logBackupCli = logbackup.NewLogBackupClient(cc) - }) - if err != nil { - return err - } - - resp, err := logBackupCli.FlushNow(ectx, &logbackup.FlushNowRequest{}) - if err != nil { - return errors.Annotatef(err, "failed to flush store %d", s.GetId()) - } - for _, res := range resp.Results { - if !res.Success { - return errors.Errorf("failed to flush task %s at store %d: %s", res.TaskName, s.GetId(), res.ErrorMessage) - } - log.Info("Force flushed task of TiKV store.", zap.Uint64("store", s.Id), zap.String("task", res.TaskName)) - } - return nil - }) - } - return eg.Wait() -} diff --git a/go.mod b/go.mod index ea2f1f5a35cbe..530987d648479 100644 --- a/go.mod +++ b/go.mod @@ -86,7 +86,7 @@ require ( github.com/pingcap/errors v0.11.5-0.20240318064555-6bd07397691f github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 github.com/pingcap/fn v1.0.0 - github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63 + github.com/pingcap/kvproto v0.0.0-20250116085028-ef010e9196a4 github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5 github.com/pingcap/tidb/pkg/parser v0.0.0-20211011031125-9b13dc409c5e @@ -156,6 +156,7 @@ require ( github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect github.com/goccy/go-reflect v1.2.0 // indirect github.com/google/flatbuffers v2.0.8+incompatible // indirect github.com/jinzhu/inflection v1.0.0 // indirect diff --git a/go.sum b/go.sum index 36c4cf5c83bf1..69367d9eadc5e 100644 --- a/go.sum +++ b/go.sum @@ -263,6 +263,7 @@ github.com/fsouza/fake-gcs-server v1.44.0/go.mod h1:M02aKoTv9Tnlf+gmWnTok1PWVCUH github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= @@ -672,6 +673,8 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63 h1:ThJ7ddLJVk96Iai2HDeyJGuuhrcBtc3HwYKJfuKPLsI= github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +github.com/pingcap/kvproto v0.0.0-20250116085028-ef010e9196a4 h1:boKz7Hm3IB2eSqRf+/XvXsJjE6J197iDtxasULHM8mw= +github.com/pingcap/kvproto v0.0.0-20250116085028-ef010e9196a4/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d h1:y3EueKVfVykdpTyfUnQGqft0ud+xVFuCdp1XkVL0X1E= diff --git a/tests/realtikvtest/brietest/pitr_test.go b/tests/realtikvtest/brietest/pitr_test.go index 2ad485d13c1b6..9949ae46bd3c6 100644 --- a/tests/realtikvtest/brietest/pitr_test.go +++ b/tests/realtikvtest/brietest/pitr_test.go @@ -22,7 +22,6 @@ import ( "math" "os" "path/filepath" - "regexp" "strings" "sync/atomic" "testing" @@ -34,12 +33,9 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/gluetidb" - "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/streamhelper" "github.com/pingcap/tidb/br/pkg/summary" "github.com/pingcap/tidb/br/pkg/task" - "github.com/pingcap/tidb/br/pkg/task/operator" - "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/session" @@ -136,7 +132,7 @@ func NewLogBackupKit(t *testing.T) *LogBackupKit { tk := initTestKit(t) metaCli := streamhelper.NewMetaDataClient(domain.GetDomain(tk.Session()).EtcdClient()) begin := time.Now() - // So the cases can finish faster... + // So the cases can finish faster... If force flush not supported... tk.MustExec("set config tikv `log-backup.max-flush-interval` = '30s';") t.Cleanup(func() { if !t.Failed() { @@ -284,16 +280,7 @@ func (kit *LogBackupKit) runAndCheck(f func(context.Context) error) { func (kit *LogBackupKit) forceFlush() { kit.runAndCheck(func(ctx context.Context) error { - cfg := task.DefaultConfig() - cfg.PD = append(cfg.PD, config.GetGlobalConfig().Path) - err := operator.RunForceFlush(ctx, &operator.ForceFlushConfig{ - Config: cfg, - StoresPattern: regexp.MustCompile(".*"), - }) - if err != nil { - log.Warn("[TEST.forceFlush] It seems this version of TiKV doesn't support force flush, the test may be much more slower.", - logutil.ShortError(err)) - } + log.Warn("[TEST.forceFlush] This version doesn't support force flush, the test may be much more slower.") return nil }) } From 7ce086b1e327e2823ec0af03e9ea312b2b791635 Mon Sep 17 00:00:00 2001 From: hillium Date: Thu, 16 Jan 2025 21:32:28 +0800 Subject: [PATCH 15/17] update DEPS Signed-off-by: hillium --- DEPS.bzl | 12 ++++++------ br/pkg/task/operator/BUILD.bazel | 5 ----- tests/realtikvtest/brietest/BUILD.bazel | 1 - 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/DEPS.bzl b/DEPS.bzl index b223666edae78..ddfcfc4506012 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -5776,13 +5776,13 @@ def go_deps(): name = "com_github_pingcap_kvproto", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/kvproto", - sha256 = "db08607b0c90f3909b66577e9c568d0cbd6b2825d287d7b5caab86ea6e4b60ad", - strip_prefix = "github.com/pingcap/kvproto@v0.0.0-20250108041715-3b77f2c65c63", + sha256 = "f032df69d754f19adf7ac245dade0fdaeb12323e314c462feae8d095d777a681", + strip_prefix = "github.com/pingcap/kvproto@v0.0.0-20250116085028-ef010e9196a4", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250108041715-3b77f2c65c63.zip", - "http://ats.apps.svc/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250108041715-3b77f2c65c63.zip", - "https://cache.hawkingrei.com/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250108041715-3b77f2c65c63.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250108041715-3b77f2c65c63.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250116085028-ef010e9196a4.zip", + "http://ats.apps.svc/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250116085028-ef010e9196a4.zip", + "https://cache.hawkingrei.com/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250116085028-ef010e9196a4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20250116085028-ef010e9196a4.zip", ], ) go_repository( diff --git a/br/pkg/task/operator/BUILD.bazel b/br/pkg/task/operator/BUILD.bazel index 508d29b6fbe52..a91d46d461380 100644 --- a/br/pkg/task/operator/BUILD.bazel +++ b/br/pkg/task/operator/BUILD.bazel @@ -6,7 +6,6 @@ go_library( "base64ify.go", "checksum_table.go", "config.go", - "force_flush.go", "list_migration.go", "migrate_to.go", "prepare_snap.go", @@ -30,20 +29,16 @@ go_library( "//pkg/domain", "//pkg/meta/model", "//pkg/util", - "//pkg/util/engine", "@com_github_fatih_color//:color", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/brpb", - "@com_github_pingcap_kvproto//pkg/logbackuppb", - "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_pingcap_log//:log", "@com_github_spf13_pflag//:pflag", "@com_github_tikv_client_go_v2//oracle", "@com_github_tikv_client_go_v2//tikv", "@com_github_tikv_client_go_v2//util", "@com_github_tikv_pd_client//:client", - "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//keepalive", "@org_golang_x_sync//errgroup", "@org_uber_go_multierr//:multierr", diff --git a/tests/realtikvtest/brietest/BUILD.bazel b/tests/realtikvtest/brietest/BUILD.bazel index 7efeafa282f19..da60d0a35e13d 100644 --- a/tests/realtikvtest/brietest/BUILD.bazel +++ b/tests/realtikvtest/brietest/BUILD.bazel @@ -16,7 +16,6 @@ go_test( deps = [ "//br/pkg/glue", "//br/pkg/gluetidb", - "//br/pkg/logutil", "//br/pkg/streamhelper", "//br/pkg/summary", "//br/pkg/task", From b5ee1c5ab1165fb57c211c6c1a064573dc2b8b7b Mon Sep 17 00:00:00 2001 From: hillium Date: Thu, 16 Jan 2025 23:36:25 +0800 Subject: [PATCH 16/17] make tidy Signed-off-by: hillium --- go.mod | 1 - go.sum | 3 --- 2 files changed, 4 deletions(-) diff --git a/go.mod b/go.mod index 530987d648479..a04c86128d7ce 100644 --- a/go.mod +++ b/go.mod @@ -156,7 +156,6 @@ require ( github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect github.com/goccy/go-reflect v1.2.0 // indirect github.com/google/flatbuffers v2.0.8+incompatible // indirect github.com/jinzhu/inflection v1.0.0 // indirect diff --git a/go.sum b/go.sum index 69367d9eadc5e..558dde90515c0 100644 --- a/go.sum +++ b/go.sum @@ -263,7 +263,6 @@ github.com/fsouza/fake-gcs-server v1.44.0/go.mod h1:M02aKoTv9Tnlf+gmWnTok1PWVCUH github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= @@ -671,8 +670,6 @@ github.com/pingcap/fn v1.0.0/go.mod h1:u9WZ1ZiOD1RpNhcI42RucFh/lBuzTu6rw88a+oF2Z github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63 h1:ThJ7ddLJVk96Iai2HDeyJGuuhrcBtc3HwYKJfuKPLsI= -github.com/pingcap/kvproto v0.0.0-20250108041715-3b77f2c65c63/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/kvproto v0.0.0-20250116085028-ef010e9196a4 h1:boKz7Hm3IB2eSqRf+/XvXsJjE6J197iDtxasULHM8mw= github.com/pingcap/kvproto v0.0.0-20250116085028-ef010e9196a4/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= From 8934c8fe30843a78fffe51ed832aa4dbe70667ad Mon Sep 17 00:00:00 2001 From: hillium Date: Thu, 16 Jan 2025 23:55:07 +0800 Subject: [PATCH 17/17] fix test Signed-off-by: hillium --- br/pkg/stream/stream_metas.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/br/pkg/stream/stream_metas.go b/br/pkg/stream/stream_metas.go index 9ac730bcbb920..7263aeadc7e26 100644 --- a/br/pkg/stream/stream_metas.go +++ b/br/pkg/stream/stream_metas.go @@ -705,6 +705,11 @@ func (m MigrationExt) Load(ctx context.Context, opts ...LoadOptions) (Migrations if err != nil { return errors.Trace(err) } + err = t.unmarshalContent(b) + if err != nil { + return err + } + if t.SeqNum == baseMigrationSN { // NOTE: the legacy truncating isn't implemented by appending a migration. // We load their checkpoint here to be compatible with them. @@ -715,7 +720,7 @@ func (m MigrationExt) Load(ctx context.Context, opts ...LoadOptions) (Migrations } t.Content.TruncatedTo = max(truncatedTs, t.Content.TruncatedTo) } - return t.Content.Unmarshal(b) + return nil }) collected := iter.CollectAll(ctx, items) if collected.Err != nil {