From b32224c4146268635997286a126bbe3b7276edc6 Mon Sep 17 00:00:00 2001 From: Stefan Hengl Date: Tue, 19 Jan 2021 10:22:05 +0100 Subject: [PATCH] search: extract progress from graphqlbackend (#17373) This is a pure refactor to extract code related to progress updates from graphlqlbackend. --- cmd/frontend/graphqlbackend/search.go | 5 +- cmd/frontend/graphqlbackend/search_commits.go | 11 +- .../graphqlbackend/search_pagination.go | 49 +++--- .../graphqlbackend/search_pagination_test.go | 61 ++++---- .../graphqlbackend/search_progress_test.go | 56 ------- .../graphqlbackend/search_repositories.go | 7 +- .../search_repositories_test.go | 35 ++--- cmd/frontend/graphqlbackend/search_results.go | 131 +++------------- .../graphqlbackend/search_results_test.go | 39 ++--- .../graphqlbackend/search_suggestions_test.go | 19 +-- cmd/frontend/graphqlbackend/search_symbols.go | 11 +- cmd/frontend/graphqlbackend/textsearch.go | 11 +- cmd/frontend/graphqlbackend/zoekt.go | 13 +- cmd/frontend/graphqlbackend/zoekt_test.go | 19 +-- cmd/frontend/internal/search/search.go | 22 ++- .../search/streaming/api/progress.go | 142 ++++++++++-------- .../search/streaming/api/progress_test.go | 60 ++++++++ .../golden/TestSearchProgress/all.json | 0 .../golden/TestSearchProgress/empty.json | 0 .../TestSearchProgress/timedout100.json | 2 +- .../{progress.go => streaming/api/types.go} | 2 +- internal/search/streaming/progress.go | 95 ++++++++++++ 22 files changed, 427 insertions(+), 363 deletions(-) delete mode 100644 cmd/frontend/graphqlbackend/search_progress_test.go rename cmd/frontend/graphqlbackend/search_progress.go => internal/search/streaming/api/progress.go (55%) create mode 100644 internal/search/streaming/api/progress_test.go rename {cmd/frontend/graphqlbackend => internal/search/streaming/api}/testdata/golden/TestSearchProgress/all.json (100%) rename {cmd/frontend/graphqlbackend => internal/search/streaming/api}/testdata/golden/TestSearchProgress/empty.json (100%) rename {cmd/frontend/graphqlbackend => internal/search/streaming/api}/testdata/golden/TestSearchProgress/timedout100.json (60%) rename internal/search/{progress.go => streaming/api/types.go} (99%) create mode 100644 internal/search/streaming/progress.go diff --git a/cmd/frontend/graphqlbackend/search.go b/cmd/frontend/graphqlbackend/search.go index af290b7ca0cf..aac8f41ceca1 100644 --- a/cmd/frontend/graphqlbackend/search.go +++ b/cmd/frontend/graphqlbackend/search.go @@ -22,6 +22,7 @@ import ( searchbackend "github.com/sourcegraph/sourcegraph/internal/search/backend" "github.com/sourcegraph/sourcegraph/internal/search/query" querytypes "github.com/sourcegraph/sourcegraph/internal/search/query/types" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/vcs" "github.com/sourcegraph/sourcegraph/schema" @@ -577,7 +578,7 @@ func sortSearchSuggestions(s []*searchSuggestionResolver) { // returning common as to reflect that new information. If searchErr is a fatal error, // it returns a non-nil error; otherwise, if searchErr == nil or a non-fatal error, it returns a // nil error. -func handleRepoSearchResult(repoRev *search.RepositoryRevisions, limitHit, timedOut bool, searchErr error) (_ SearchResultsCommon, fatalErr error) { +func handleRepoSearchResult(repoRev *search.RepositoryRevisions, limitHit, timedOut bool, searchErr error) (_ streaming.Stats, fatalErr error) { var status search.RepoStatus if limitHit { status |= search.RepoStatusLimitHit @@ -604,7 +605,7 @@ func handleRepoSearchResult(repoRev *search.RepositoryRevisions, limitHit, timed } else { status |= search.RepoStatusSearched } - return SearchResultsCommon{ + return streaming.Stats{ Status: search.RepoStatusSingleton(repoRev.Repo.ID, status), IsLimitHit: limitHit, }, fatalErr diff --git a/cmd/frontend/graphqlbackend/search_commits.go b/cmd/frontend/graphqlbackend/search_commits.go index 757605b70452..8341a8f72175 100644 --- a/cmd/frontend/graphqlbackend/search_commits.go +++ b/cmd/frontend/graphqlbackend/search_commits.go @@ -20,6 +20,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/errcode" "github.com/sourcegraph/sourcegraph/internal/search" "github.com/sourcegraph/sourcegraph/internal/search/query" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/vcs/git" ) @@ -525,7 +526,7 @@ type searchCommitsInReposParameters struct { ResultChannel chan<- []SearchResultResolver } -func searchCommitsInRepos(ctx context.Context, args *search.TextParametersForCommitParameters, params searchCommitsInReposParameters) ([]SearchResultResolver, *SearchResultsCommon, error) { +func searchCommitsInRepos(ctx context.Context, args *search.TextParametersForCommitParameters, params searchCommitsInReposParameters) ([]SearchResultResolver, *streaming.Stats, error) { var err error tr, ctx := trace.New(ctx, params.TraceName, fmt.Sprintf("query: %+v, numRepoRevs: %d", args.PatternInfo, len(args.Repos))) defer func() { @@ -559,7 +560,7 @@ func searchCommitsInRepos(ctx context.Context, args *search.TextParametersForCom wg sync.WaitGroup mu sync.Mutex unflattened [][]*CommitSearchResultResolver - common = &SearchResultsCommon{} + common = &streaming.Stats{} ) for _, repoRev := range args.Repos { // Skip the repo if no revisions were resolved for it @@ -587,7 +588,7 @@ func searchCommitsInRepos(ctx context.Context, args *search.TextParametersForCom err = errors.Wrapf(searchErr, "failed to search commit %s %s", params.ErrorName, repoRev.String()) cancel() } - common.update(&repoCommon) + common.Update(&repoCommon) if len(results) > 0 { unflattened = append(unflattened, results) } @@ -606,7 +607,7 @@ func searchCommitsInRepos(ctx context.Context, args *search.TextParametersForCom } // searchCommitDiffsInRepos searches a set of repos for matching commit diffs. -func searchCommitDiffsInRepos(ctx context.Context, args *search.TextParametersForCommitParameters, resultChannel chan<- []SearchResultResolver) ([]SearchResultResolver, *SearchResultsCommon, error) { +func searchCommitDiffsInRepos(ctx context.Context, args *search.TextParametersForCommitParameters, resultChannel chan<- []SearchResultResolver) ([]SearchResultResolver, *streaming.Stats, error) { return searchCommitsInRepos(ctx, args, searchCommitsInReposParameters{ TraceName: "searchCommitDiffsInRepos", ErrorName: "diffs", @@ -620,7 +621,7 @@ func searchCommitDiffsInRepos(ctx context.Context, args *search.TextParametersFo } // searchCommitLogInRepos searches a set of repos for matching commits. -func searchCommitLogInRepos(ctx context.Context, args *search.TextParametersForCommitParameters, resultChannel chan<- []SearchResultResolver) ([]SearchResultResolver, *SearchResultsCommon, error) { +func searchCommitLogInRepos(ctx context.Context, args *search.TextParametersForCommitParameters, resultChannel chan<- []SearchResultResolver) ([]SearchResultResolver, *streaming.Stats, error) { var terms []string if args.PatternInfo.Pattern != "" { terms = append(terms, args.PatternInfo.Pattern) diff --git a/cmd/frontend/graphqlbackend/search_pagination.go b/cmd/frontend/graphqlbackend/search_pagination.go index bf4631bafc50..9c6880ef0618 100644 --- a/cmd/frontend/graphqlbackend/search_pagination.go +++ b/cmd/frontend/graphqlbackend/search_pagination.go @@ -16,6 +16,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/db" "github.com/sourcegraph/sourcegraph/internal/search" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/types" ) @@ -175,12 +176,12 @@ func (r *searchResolver) paginatedResults(ctx context.Context) (result *SearchRe return repoIsLess(resolved.RepoRevs[i].Repo, resolved.RepoRevs[j].Repo) }) - common := SearchResultsCommon{} + common := streaming.Stats{} cursor, results, fileCommon, err := paginatedSearchFilesInRepos(ctx, &args, r.pagination) if err != nil { return nil, err } - common.update(fileCommon) + common.Update(fileCommon) tr.LazyPrintf("results=%d %s", len(results), &common) @@ -199,11 +200,11 @@ func (r *searchResolver) paginatedResults(ctx context.Context) (result *SearchRe ) return &SearchResultsResolver{ - start: start, - SearchResultsCommon: common, - SearchResults: results, - alert: alert, - cursor: cursor, + start: start, + Stats: common, + SearchResults: results, + alert: alert, + cursor: cursor, }, nil } @@ -239,7 +240,7 @@ func repoIsLess(i, j *types.RepoName) bool { // top of the penalty we incur from the larger `count:` mentioned in point // 2 above (in the worst case scenario). // -func paginatedSearchFilesInRepos(ctx context.Context, args *search.TextParameters, pagination *searchPaginationInfo) (*searchCursor, []SearchResultResolver, *SearchResultsCommon, error) { +func paginatedSearchFilesInRepos(ctx context.Context, args *search.TextParameters, pagination *searchPaginationInfo) (*searchCursor, []SearchResultResolver, *streaming.Stats, error) { repos, err := getRepos(ctx, args.RepoPromise) if err != nil { return nil, nil, nil, err @@ -252,18 +253,18 @@ func paginatedSearchFilesInRepos(ctx context.Context, args *search.TextParameter searchBucketMin: 10, searchBucketMax: 1000, } - return plan.execute(ctx, func(batch []*search.RepositoryRevisions) ([]SearchResultResolver, *SearchResultsCommon, error) { + return plan.execute(ctx, func(batch []*search.RepositoryRevisions) ([]SearchResultResolver, *streaming.Stats, error) { batchArgs := *args batchArgs.RepoPromise = (&search.Promise{}).Resolve(batch) fileResults, fileCommon, err := searchFilesInRepos(ctx, &batchArgs, nil) - // Timeouts are reported through SearchResultsCommon so don't report an error for them + // Timeouts are reported through Stats so don't report an error for them if err != nil && !(err == context.DeadlineExceeded || err == context.Canceled) { return nil, nil, err } if fileCommon == nil { // searchFilesInRepos can return a nil structure, but the executor // requires a non-nil one always (which is more sane). - fileCommon = &SearchResultsCommon{} + fileCommon = &streaming.Stats{} } // fileResults is not sorted so we must sort it now. fileCommon may or // may not be sorted, but we do not rely on its order. @@ -310,9 +311,9 @@ type repoPaginationPlan struct { // executor is a function which searches a batch of repositories. // -// A non-nil SearchResultsCommon must always be returned, even if an error is +// A non-nil Stats must always be returned, even if an error is // returned. -type executor func(batch []*search.RepositoryRevisions) ([]SearchResultResolver, *SearchResultsCommon, error) +type executor func(batch []*search.RepositoryRevisions) ([]SearchResultResolver, *streaming.Stats, error) // repoOfResult is a helper function to resolve the repo associated with a result type. func repoOfResult(result SearchResultResolver) string { @@ -337,7 +338,7 @@ func repoOfResult(result SearchResultResolver) string { // // If the executor returns any error, the search will be cancelled and the error // returned. -func (p *repoPaginationPlan) execute(ctx context.Context, exec executor) (c *searchCursor, results []SearchResultResolver, common *SearchResultsCommon, err error) { +func (p *repoPaginationPlan) execute(ctx context.Context, exec executor) (c *searchCursor, results []SearchResultResolver, common *streaming.Stats, err error) { // Determine how large the batches of repositories we will search over will be. var totalRepos int if p.mockNumTotalRepos != nil { @@ -363,7 +364,7 @@ func (p *repoPaginationPlan) execute(ctx context.Context, exec executor) (c *sea repos = repos[repositoryOffset:] } - // Search backends don't populate SearchResultsCommon.repos, the + // Search backends don't populate Stats.repos, the // repository searcher does. We need to do that here. commonRepos := make(map[api.RepoID]*types.RepoName, len(repos)) for _, r := range repos { @@ -371,7 +372,7 @@ func (p *repoPaginationPlan) execute(ctx context.Context, exec executor) (c *sea } // Search over the repos list in batches. - common = &SearchResultsCommon{ + common = &streaming.Stats{ Repos: commonRepos, } for start := 0; start <= len(repos); start += batchSize { @@ -382,7 +383,7 @@ func (p *repoPaginationPlan) execute(ctx context.Context, exec executor) (c *sea batch := repos[start:clamp(start+batchSize, 0, len(repos))] batchResults, batchCommon, err := exec(batch) if batchCommon == nil { - panic("never here: repoPaginationPlan.executor illegally returned nil SearchResultsCommon structure") + panic("never here: repoPaginationPlan.executor illegally returned nil Stats structure") } if err != nil { return nil, nil, nil, err @@ -390,7 +391,7 @@ func (p *repoPaginationPlan) execute(ctx context.Context, exec executor) (c *sea // Accumulate the results and stop if we have enough for the user. results = append(results, batchResults...) - common.update(batchCommon) + common.Update(batchCommon) if len(results) >= resultOffset+int(p.pagination.limit) { break @@ -447,7 +448,7 @@ type slicedSearchResults struct { results []SearchResultResolver // common is the new common results structure, updated to reflect the sliced results only. - common *SearchResultsCommon + common *streaming.Stats // resultOffset indicates where the search would continue within the last // repository whose results were consumed. For example: @@ -463,9 +464,9 @@ type slicedSearchResults struct { } // sliceSearchResults effectively slices results[offset:offset+limit] and -// returns an updated SearchResultsCommon structure to reflect that, as well as +// returns an updated Stats structure to reflect that, as well as // information about the slicing that was performed. -func sliceSearchResults(results []SearchResultResolver, common *SearchResultsCommon, offset, limit int) (final slicedSearchResults) { +func sliceSearchResults(results []SearchResultResolver, common *streaming.Stats, offset, limit int) (final slicedSearchResults) { firstRepo := "" if len(results[:offset]) > 0 { firstRepo = repoOfResult(results[offset]) @@ -526,7 +527,7 @@ func sliceSearchResults(results []SearchResultResolver, common *SearchResultsCom final.resultOffset++ } - // Construct the new SearchResultsCommon structure for just the results + // Construct the new Stats structure for just the results // we're returning. seenRepos := map[string]struct{}{} finalResults := make([]SearchResultResolver, 0, limit) @@ -548,11 +549,11 @@ func sliceSearchResults(results []SearchResultResolver, common *SearchResultsCom return } -func sliceSearchResultsCommon(common *SearchResultsCommon, firstResultRepo, lastResultRepo string) *SearchResultsCommon { +func sliceSearchResultsCommon(common *streaming.Stats, firstResultRepo, lastResultRepo string) *streaming.Stats { if common.Status.Any(search.RepoStatusLimitHit) { panic("never here: partial results should never be present in paginated search") } - final := &SearchResultsCommon{ + final := &streaming.Stats{ IsLimitHit: false, // irrelevant in paginated search IsIndexUnavailable: common.IsIndexUnavailable, Repos: make(map[api.RepoID]*types.RepoName), diff --git a/cmd/frontend/graphqlbackend/search_pagination_test.go b/cmd/frontend/graphqlbackend/search_pagination_test.go index 5ae3c18fa652..d5c8101e7048 100644 --- a/cmd/frontend/graphqlbackend/search_pagination_test.go +++ b/cmd/frontend/graphqlbackend/search_pagination_test.go @@ -13,6 +13,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/search" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/types" ) @@ -82,7 +83,7 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { result(repoName("org/repo5"), "d.go"), result(repoName("org/repo5"), "e.go"), } - sharedCommon := &SearchResultsCommon{ + sharedCommon := &streaming.Stats{ // Note: this is an intentionally unordered list to ensure we do not // rely on the order of lists in common (which is not guaranteed by // tests). @@ -91,19 +92,19 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { tests := []struct { name string results []SearchResultResolver - common *SearchResultsCommon + common *streaming.Stats offset, limit int want slicedSearchResults }{ { name: "empty result set", results: []SearchResultResolver{}, - common: &SearchResultsCommon{}, + common: &streaming.Stats{}, offset: 0, limit: 3, want: slicedSearchResults{ results: []SearchResultResolver{}, - common: &SearchResultsCommon{ + common: &streaming.Stats{ Repos: nil, }, resultOffset: 0, @@ -122,7 +123,7 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { result(repoName("org/repo1"), "b.go"), result(repoName("org/repo1"), "c.go"), }, - common: &SearchResultsCommon{ + common: &streaming.Stats{ Repos: reposMap(repoName("org/repo1")), }, resultOffset: 0, @@ -140,7 +141,7 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { result(repoName("org/repo1"), "a.go"), result(repoName("org/repo1"), "b.go"), }, - common: &SearchResultsCommon{ + common: &streaming.Stats{ Repos: reposMap(repoName("org/repo1")), }, resultOffset: 2, @@ -159,7 +160,7 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { result(repoName("org/repo2"), "b.go"), result(repoName("org/repo3"), "a.go"), }, - common: &SearchResultsCommon{ + common: &streaming.Stats{ Repos: reposMap(repoName("org/repo2"), repoName("org/repo3")), }, resultOffset: 0, @@ -178,7 +179,7 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { result(repoName("org/repo2"), "a.go"), result(repoName("org/repo2"), "b.go"), }, - common: &SearchResultsCommon{ + common: &streaming.Stats{ Repos: reposMap(repoName("org/repo1"), repoName("org/repo2")), }, resultOffset: 0, @@ -195,7 +196,7 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { result(repoName("org/repo2"), "b.go"), result(repoName("org/repo2"), "c.go"), }, - common: &SearchResultsCommon{ + common: &streaming.Stats{ Repos: reposMap(repoName("org/repo1"), repoName("org/repo2")), }, offset: 3, @@ -206,7 +207,7 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { result(repoName("org/repo2"), "b.go"), result(repoName("org/repo2"), "c.go"), }, - common: &SearchResultsCommon{ + common: &streaming.Stats{ Repos: reposMap(repoName("org/repo2")), }, resultOffset: 0, @@ -223,7 +224,7 @@ func TestSearchPagination_sliceSearchResults(t *testing.T) { results: []SearchResultResolver{ result(repoName("org/repo1"), "b.go"), }, - common: &SearchResultsCommon{ + common: &streaming.Stats{ Repos: reposMap(repoName("org/repo1")), }, resultOffset: 2, @@ -272,9 +273,9 @@ func TestSearchPagination_repoPaginationPlan(t *testing.T) { repoRevs("5", "master"), } var searchedBatches [][]*search.RepositoryRevisions - resultsExecutor := func(batch []*search.RepositoryRevisions) (results []SearchResultResolver, common *SearchResultsCommon, err error) { + resultsExecutor := func(batch []*search.RepositoryRevisions) (results []SearchResultResolver, common *streaming.Stats, err error) { searchedBatches = append(searchedBatches, batch) - common = &SearchResultsCommon{Repos: reposMap()} + common = &streaming.Stats{Repos: reposMap()} for _, repoRev := range batch { for _, rev := range repoRev.Revs { rev := rev.RevSpec @@ -286,8 +287,8 @@ func TestSearchPagination_repoPaginationPlan(t *testing.T) { } return } - noResultsExecutor := func(batch []*search.RepositoryRevisions) (results []SearchResultResolver, common *SearchResultsCommon, err error) { - return nil, &SearchResultsCommon{}, nil + noResultsExecutor := func(batch []*search.RepositoryRevisions) (results []SearchResultResolver, common *streaming.Stats, err error) { + return nil, &streaming.Stats{}, nil } ctx := context.Background() @@ -298,7 +299,7 @@ func TestSearchPagination_repoPaginationPlan(t *testing.T) { wantSearchedBatches [][]*search.RepositoryRevisions wantCursor *searchCursor wantResults []SearchResultResolver - wantCommon *SearchResultsCommon + wantCommon *streaming.Stats wantErr error }{ { @@ -328,7 +329,7 @@ func TestSearchPagination_repoPaginationPlan(t *testing.T) { result(repoName("3"), "some/file2.go", "master"), result(repoName("3"), "some/file0.go", "feature"), }, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(repoName("1"), repoName("2"), repoName("3")), }, }, @@ -356,7 +357,7 @@ func TestSearchPagination_repoPaginationPlan(t *testing.T) { result(repoName("5"), "some/file1.go", "master"), result(repoName("5"), "some/file2.go", "master"), }, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(repoName("3"), repoName("4"), repoName("5")), }, }, @@ -378,7 +379,7 @@ func TestSearchPagination_repoPaginationPlan(t *testing.T) { wantResults: []SearchResultResolver{ result(repoName("1"), "some/file0.go", "master"), }, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(repoName("1")), }, }, @@ -400,7 +401,7 @@ func TestSearchPagination_repoPaginationPlan(t *testing.T) { wantResults: []SearchResultResolver{ result(repoName("1"), "some/file1.go", "master"), }, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(repoName("1")), }, }, @@ -412,7 +413,7 @@ func TestSearchPagination_repoPaginationPlan(t *testing.T) { limit: 1, }, wantCursor: &searchCursor{RepositoryOffset: 1, ResultOffset: 0, Finished: true}, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(), }, }, @@ -488,8 +489,8 @@ func TestSearchPagination_issue_6287(t *testing.T) { repoRevs("1", "master"), repoRevs("2", "master"), } - executor := func(batch []*search.RepositoryRevisions) (results []SearchResultResolver, common *SearchResultsCommon, err error) { - common = &SearchResultsCommon{Repos: reposMap()} + executor := func(batch []*search.RepositoryRevisions) (results []SearchResultResolver, common *streaming.Stats, err error) { + common = &streaming.Stats{Repos: reposMap()} for _, repoRev := range batch { results = append(results, repoResults[string(repoRev.Repo.Name)]...) common.Repos[repoRev.Repo.ID] = repoRev.Repo @@ -620,8 +621,8 @@ func TestSearchPagination_cloning_missing(t *testing.T) { repoRevs("e", "master"), repoRevs("f", "master"), } - executor := func(batch []*search.RepositoryRevisions) (results []SearchResultResolver, common *SearchResultsCommon, err error) { - common = &SearchResultsCommon{Repos: reposMap()} + executor := func(batch []*search.RepositoryRevisions) (results []SearchResultResolver, common *streaming.Stats, err error) { + common = &streaming.Stats{Repos: reposMap()} for _, repoRev := range batch { if res, ok := repoResults[string(repoRev.Repo.Name)]; ok { results = append(results, res...) @@ -640,7 +641,7 @@ func TestSearchPagination_cloning_missing(t *testing.T) { searchRepos []*search.RepositoryRevisions wantCursor *searchCursor wantResults []SearchResultResolver - wantCommon *SearchResultsCommon + wantCommon *streaming.Stats wantErr error }{ { @@ -653,7 +654,7 @@ func TestSearchPagination_cloning_missing(t *testing.T) { wantResults: []SearchResultResolver{ result(repoName("a"), "a.go"), }, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(repoName("a")), }, }, @@ -667,7 +668,7 @@ func TestSearchPagination_cloning_missing(t *testing.T) { wantResults: []SearchResultResolver{ result(repoName("c"), "a.go"), }, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(repoName("b"), repoName("c")), Status: reposStatus(map[string]search.RepoStatus{ @@ -686,7 +687,7 @@ func TestSearchPagination_cloning_missing(t *testing.T) { result(repoName("a"), "a.go"), result(repoName("c"), "a.go"), }, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(repoName("a"), repoName("b"), repoName("c")), Status: reposStatus(map[string]search.RepoStatus{ "b": search.RepoStatusMissing, @@ -705,7 +706,7 @@ func TestSearchPagination_cloning_missing(t *testing.T) { result(repoName("c"), "a.go"), result(repoName("f"), "a.go"), }, - wantCommon: &SearchResultsCommon{ + wantCommon: &streaming.Stats{ Repos: reposMap(repoName("a"), repoName("b"), repoName("c"), repoName("d"), repoName("e"), repoName("f")), Status: reposStatus(map[string]search.RepoStatus{ "b": search.RepoStatusMissing, diff --git a/cmd/frontend/graphqlbackend/search_progress_test.go b/cmd/frontend/graphqlbackend/search_progress_test.go deleted file mode 100644 index aaa8d178ef4a..000000000000 --- a/cmd/frontend/graphqlbackend/search_progress_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package graphqlbackend - -import ( - "flag" - "fmt" - "testing" - - "github.com/sourcegraph/sourcegraph/internal/search" - "github.com/sourcegraph/sourcegraph/internal/testutil" - "github.com/sourcegraph/sourcegraph/internal/types" -) - -var updateGolden = flag.Bool("update", false, "Update testdata goldens") - -func TestSearchProgress(t *testing.T) { - var timedout100 []*types.RepoName - var timedout100Status search.RepoStatusMap - for i := 0; i < 100; i++ { - r := mkRepos(fmt.Sprintf("timedout-%d", i))[0] - timedout100 = append(timedout100, r) - timedout100Status.Update(r.ID, search.RepoStatusTimedout) - } - - cases := map[string]*SearchResultsResolver{ - "empty": {}, - "timedout100": { - SearchResultsCommon: SearchResultsCommon{ - Repos: reposMap(timedout100...), - Status: timedout100Status, - }, - }, - "all": { - SearchResults: []SearchResultResolver{mkFileMatch(&types.RepoName{Name: "found-1"}, "dir/file", 123)}, - SearchResultsCommon: SearchResultsCommon{ - IsLimitHit: true, - Repos: reposMap(mkRepos("found-1", "missing-1", "missing-2", "cloning-1", "timedout-1")...), - Status: mkStatusMap(map[string]search.RepoStatus{ - "missing-1": search.RepoStatusMissing, - "missing-2": search.RepoStatusMissing, - "cloning-1": search.RepoStatusCloning, - "timedout-1": search.RepoStatusTimedout, - }), - ExcludedForks: 5, - ExcludedArchived: 1, - }, - }, - } - - for name, sr := range cases { - t.Run(name, func(t *testing.T) { - got := sr.Progress() - got.DurationMs = 0 // clear out non-deterministic field - testutil.AssertGolden(t, "testdata/golden/"+t.Name()+".json", *updateGolden, got) - }) - } -} diff --git a/cmd/frontend/graphqlbackend/search_repositories.go b/cmd/frontend/graphqlbackend/search_repositories.go index e6f6eb35583e..4c1e48473300 100644 --- a/cmd/frontend/graphqlbackend/search_repositories.go +++ b/cmd/frontend/graphqlbackend/search_repositories.go @@ -6,19 +6,20 @@ import ( "regexp" "github.com/sourcegraph/sourcegraph/internal/api" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/search" "github.com/sourcegraph/sourcegraph/internal/search/query" ) -var mockSearchRepositories func(args *search.TextParameters) ([]SearchResultResolver, *SearchResultsCommon, error) +var mockSearchRepositories func(args *search.TextParameters) ([]SearchResultResolver, *streaming.Stats, error) var repoIcon = "data:image/svg+xml;base64,PHN2ZyB2ZXJzaW9uPSIxLjEiIGlkPSJMYXllcl8xIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hsaW5rIiB4PSIwcHgiIHk9IjBweCIKCSB2aWV3Qm94PSIwIDAgNjQgNjQiIHN0eWxlPSJlbmFibGUtYmFja2dyb3VuZDpuZXcgMCAwIDY0IDY0OyIgeG1sOnNwYWNlPSJwcmVzZXJ2ZSI+Cjx0aXRsZT5JY29ucyA0MDA8L3RpdGxlPgo8Zz4KCTxwYXRoIGQ9Ik0yMywyMi40YzEuMywwLDIuNC0xLjEsMi40LTIuNHMtMS4xLTIuNC0yLjQtMi40Yy0xLjMsMC0yLjQsMS4xLTIuNCwyLjRTMjEuNywyMi40LDIzLDIyLjR6Ii8+Cgk8cGF0aCBkPSJNMzUsMjYuNGMxLjMsMCwyLjQtMS4xLDIuNC0yLjRzLTEuMS0yLjQtMi40LTIuNHMtMi40LDEuMS0yLjQsMi40UzMzLjcsMjYuNCwzNSwyNi40eiIvPgoJPHBhdGggZD0iTTIzLDQyLjRjMS4zLDAsMi40LTEuMSwyLjQtMi40cy0xLjEtMi40LTIuNC0yLjRzLTIuNCwxLjEtMi40LDIuNFMyMS43LDQyLjQsMjMsNDIuNHoiLz4KCTxwYXRoIGQ9Ik01MCwxNmgtMS41Yy0wLjMsMC0wLjUsMC4yLTAuNSwwLjV2MzVjMCwwLjMtMC4yLDAuNS0wLjUsMC41aC0yN2MtMC41LDAtMS0wLjItMS40LTAuNmwtMC42LTAuNmMtMC4xLTAuMS0wLjEtMC4yLTAuMS0wLjQKCQljMC0wLjMsMC4yLTAuNSwwLjUtMC41SDQ0YzEuMSwwLDItMC45LDItMlYxMmMwLTEuMS0wLjktMi0yLTJIMTRjLTEuMSwwLTIsMC45LTIsMnYzNi4zYzAsMS4xLDAuNCwyLjEsMS4yLDIuOGwzLjEsMy4xCgkJYzEuMSwxLjEsMi43LDEuOCw0LjIsMS44SDUwYzEuMSwwLDItMC45LDItMlYxOEM1MiwxNi45LDUxLjEsMTYsNTAsMTZ6IE0xOSwyMGMwLTIuMiwxLjgtNCw0LTRjMS40LDAsMi44LDAuOCwzLjUsMgoJCWMxLjEsMS45LDAuNCw0LjMtMS41LDUuNFYzM2MxLTAuNiwyLjMtMC45LDQtMC45YzEsMCwyLTAuNSwyLjgtMS4zQzMyLjUsMzAsMzMsMjkuMSwzMywyOHYtMC42Yy0xLjItMC43LTItMi0yLTMuNQoJCWMwLTIuMiwxLjgtNCw0LTRjMi4yLDAsNCwxLjgsNCw0YzAsMS41LTAuOCwyLjctMiwzLjVoMGMtMC4xLDIuMS0wLjksNC40LTIuNSw2Yy0xLjYsMS42LTMuNCwyLjQtNS41LDIuNWMtMC44LDAtMS40LDAuMS0xLjksMC4zCgkJYy0wLjIsMC4xLTEsMC44LTEuMiwwLjlDMjYuNiwzOCwyNywzOC45LDI3LDQwYzAsMi4yLTEuOCw0LTQsNHMtNC0xLjgtNC00YzAtMS41LDAuOC0yLjcsMi0zLjRWMjMuNEMxOS44LDIyLjcsMTksMjEuNCwxOSwyMHoiLz4KPC9nPgo8L3N2Zz4K" // searchRepositories searches for repositories by name. // // For a repository to match a query, the repository's name must match all of the repo: patterns AND the // default patterns (i.e., the patterns that are not prefixed with any search field). -func searchRepositories(ctx context.Context, args *search.TextParameters, limit int32) (res []SearchResultResolver, common *SearchResultsCommon, err error) { +func searchRepositories(ctx context.Context, args *search.TextParameters, limit int32) (res []SearchResultResolver, common *streaming.Stats, err error) { if mockSearchRepositories != nil { return mockSearchRepositories(args) } @@ -94,7 +95,7 @@ func searchRepositories(ctx context.Context, args *search.TextParameters, limit } } - return results, &SearchResultsCommon{ + return results, &streaming.Stats{ IsLimitHit: limitHit, }, nil } diff --git a/cmd/frontend/graphqlbackend/search_repositories_test.go b/cmd/frontend/graphqlbackend/search_repositories_test.go index b3832daa7952..716627fae30b 100644 --- a/cmd/frontend/graphqlbackend/search_repositories_test.go +++ b/cmd/frontend/graphqlbackend/search_repositories_test.go @@ -16,6 +16,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/search" searchbackend "github.com/sourcegraph/sourcegraph/internal/search/backend" "github.com/sourcegraph/sourcegraph/internal/search/query" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/types" ) @@ -28,7 +29,7 @@ func TestSearchRepositories(t *testing.T) { zoekt := &searchbackend.Zoekt{Client: &searchbackend.FakeSearcher{}} - mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *SearchResultsCommon, err error) { + mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *streaming.Stats, err error) { repos, err := getRepos(context.Background(), args.RepoPromise) if err != nil { return nil, nil, err @@ -41,18 +42,18 @@ func TestSearchRepositories(t *testing.T) { uri: "git://" + string(repoName) + "?1a2b3c#" + "f.go", Repo: &RepositoryResolver{innerRepo: &types.Repo{ID: 123}}, }, - }, &SearchResultsCommon{}, nil + }, &streaming.Stats{}, nil case "bar/one": return []*FileMatchResolver{ { uri: "git://" + string(repoName) + "?1a2b3c#" + "f.go", Repo: &RepositoryResolver{innerRepo: &types.Repo{ID: 789}}, }, - }, &SearchResultsCommon{}, nil + }, &streaming.Stats{}, nil case "foo/no-match": - return []*FileMatchResolver{}, &SearchResultsCommon{}, nil + return []*FileMatchResolver{}, &streaming.Stats{}, nil default: - return nil, &SearchResultsCommon{}, errors.New("Unexpected repo") + return nil, &streaming.Stats{}, errors.New("Unexpected repo") } } @@ -126,7 +127,7 @@ func TestSearchRepositories(t *testing.T) { } func TestRepoShouldBeAdded(t *testing.T) { - mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *SearchResultsCommon, err error) { + mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *streaming.Stats, err error) { repos, err := getRepos(context.Background(), args.RepoPromise) if err != nil { return nil, nil, err @@ -139,11 +140,11 @@ func TestRepoShouldBeAdded(t *testing.T) { uri: "git://" + string(repoName) + "?1a2b3c#" + "foo.go", Repo: &RepositoryResolver{innerRepo: &types.Repo{ID: 123}}, }, - }, &SearchResultsCommon{}, nil + }, &streaming.Stats{}, nil case "foo/no-match": - return []*FileMatchResolver{}, &SearchResultsCommon{}, nil + return []*FileMatchResolver{}, &streaming.Stats{}, nil default: - return nil, &SearchResultsCommon{}, errors.New("Unexpected repo") + return nil, &streaming.Stats{}, errors.New("Unexpected repo") } } @@ -151,13 +152,13 @@ func TestRepoShouldBeAdded(t *testing.T) { t.Run("repo should be included in results, query has repoHasFile filter", func(t *testing.T) { repo := &search.RepositoryRevisions{Repo: &types.RepoName{ID: 123, Name: "foo/one"}, Revs: []search.RevisionSpecifier{{RevSpec: ""}}} - mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *SearchResultsCommon, err error) { + mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *streaming.Stats, err error) { return []*FileMatchResolver{ { uri: "git://" + string(repo.Repo.Name) + "?1a2b3c#" + "foo.go", Repo: &RepositoryResolver{innerRepo: &types.Repo{ID: 123}}, }, - }, &SearchResultsCommon{}, nil + }, &streaming.Stats{}, nil } pat := &search.TextPatternInfo{Pattern: "", FilePatternsReposMustInclude: []string{"foo"}, IsRegExp: true, FileMatchLimit: 1, PathPatternsAreCaseSensitive: false, PatternMatchesContent: true, PatternMatchesPath: true} shouldBeAdded, err := repoShouldBeAdded(context.Background(), zoekt, repo, pat) @@ -171,8 +172,8 @@ func TestRepoShouldBeAdded(t *testing.T) { t.Run("repo shouldn't be included in results, query has repoHasFile filter ", func(t *testing.T) { repo := &search.RepositoryRevisions{Repo: &types.RepoName{Name: "foo/no-match"}, Revs: []search.RevisionSpecifier{{RevSpec: ""}}} - mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *SearchResultsCommon, err error) { - return []*FileMatchResolver{}, &SearchResultsCommon{}, nil + mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *streaming.Stats, err error) { + return []*FileMatchResolver{}, &streaming.Stats{}, nil } pat := &search.TextPatternInfo{Pattern: "", FilePatternsReposMustInclude: []string{"foo"}, IsRegExp: true, FileMatchLimit: 1, PathPatternsAreCaseSensitive: false, PatternMatchesContent: true, PatternMatchesPath: true} shouldBeAdded, err := repoShouldBeAdded(context.Background(), zoekt, repo, pat) @@ -186,13 +187,13 @@ func TestRepoShouldBeAdded(t *testing.T) { t.Run("repo shouldn't be included in results, query has -repoHasFile filter", func(t *testing.T) { repo := &search.RepositoryRevisions{Repo: &types.RepoName{ID: 123, Name: "foo/one"}, Revs: []search.RevisionSpecifier{{RevSpec: ""}}} - mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *SearchResultsCommon, err error) { + mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *streaming.Stats, err error) { return []*FileMatchResolver{ { uri: "git://" + string(repo.Repo.Name) + "?1a2b3c#" + "foo.go", Repo: &RepositoryResolver{innerRepo: &types.Repo{ID: 123}}, }, - }, &SearchResultsCommon{}, nil + }, &streaming.Stats{}, nil } pat := &search.TextPatternInfo{Pattern: "", FilePatternsReposMustExclude: []string{"foo"}, IsRegExp: true, FileMatchLimit: 1, PathPatternsAreCaseSensitive: false, PatternMatchesContent: true, PatternMatchesPath: true} shouldBeAdded, err := repoShouldBeAdded(context.Background(), zoekt, repo, pat) @@ -206,8 +207,8 @@ func TestRepoShouldBeAdded(t *testing.T) { t.Run("repo should be included in results, query has -repoHasFile filter", func(t *testing.T) { repo := &search.RepositoryRevisions{Repo: &types.RepoName{Name: "foo/no-match"}, Revs: []search.RevisionSpecifier{{RevSpec: ""}}} - mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *SearchResultsCommon, err error) { - return []*FileMatchResolver{}, &SearchResultsCommon{}, nil + mockSearchFilesInRepos = func(args *search.TextParameters) (matches []*FileMatchResolver, common *streaming.Stats, err error) { + return []*FileMatchResolver{}, &streaming.Stats{}, nil } pat := &search.TextPatternInfo{Pattern: "", FilePatternsReposMustExclude: []string{"foo"}, IsRegExp: true, FileMatchLimit: 1, PathPatternsAreCaseSensitive: false, PatternMatchesContent: true, PatternMatchesPath: true} shouldBeAdded, err := repoShouldBeAdded(context.Background(), zoekt, repo, pat) diff --git a/cmd/frontend/graphqlbackend/search_results.go b/cmd/frontend/graphqlbackend/search_results.go index ea1c7b8838c0..d1aeb350f9c4 100644 --- a/cmd/frontend/graphqlbackend/search_results.go +++ b/cmd/frontend/graphqlbackend/search_results.go @@ -39,6 +39,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/rcache" "github.com/sourcegraph/sourcegraph/internal/search" "github.com/sourcegraph/sourcegraph/internal/search/query" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/trace/ot" "github.com/sourcegraph/sourcegraph/internal/types" @@ -47,90 +48,6 @@ import ( "github.com/sourcegraph/sourcegraph/schema" ) -// SearchResultsCommon contains fields that should be returned by all funcs -// that contribute to the overall search result set. -type SearchResultsCommon struct { - // IsLimitHit is true if we do not have all results that match the query. - IsLimitHit bool - - // Repos that were matched by the repo-related filters. This should only - // be set once by search, when we have resolved Repos. - Repos map[api.RepoID]*types.RepoName - - // Status is a RepoStatusMap of repository search statuses. - Status search.RepoStatusMap - - // ExcludedForks is the count of excluded forked repos because the search - // query doesn't apply to them, but that we want to know about. - ExcludedForks int - - // ExcludedArchived is the count of excluded archived repos because the - // search query doesn't apply to them, but that we want to know about. - ExcludedArchived int - - // IsIndexUnavailable is true if indexed search was unavailable. - IsIndexUnavailable bool -} - -// update updates c with the other data, deduping as necessary. It modifies c but -// does not modify other. -func (c *SearchResultsCommon) update(other *SearchResultsCommon) { - if other == nil { - return - } - - c.IsLimitHit = c.IsLimitHit || other.IsLimitHit - c.IsIndexUnavailable = c.IsIndexUnavailable || other.IsIndexUnavailable - - if c.Repos == nil { - c.Repos = other.Repos - } else { - for id, r := range other.Repos { - c.Repos[id] = r - } - } - - c.Status.Union(&other.Status) - - c.ExcludedForks = c.ExcludedForks + other.ExcludedForks - c.ExcludedArchived = c.ExcludedArchived + other.ExcludedArchived -} - -func (c *SearchResultsCommon) String() string { - if c == nil { - return "SearchResultsCommon{}" - } - - parts := []string{ - fmt.Sprintf("status=%s", c.Status.String()), - } - nums := []struct { - name string - n int - }{ - {"repos", len(c.Repos)}, - {"excludedForks", c.ExcludedForks}, - {"excludedArchived", c.ExcludedArchived}, - } - for _, p := range nums { - if p.n != 0 { - parts = append(parts, fmt.Sprintf("%s=%d", p.name, p.n)) - } - } - if c.IsLimitHit { - parts = append(parts, "limitHit") - } - if c.IsIndexUnavailable { - parts = append(parts, "indexUnavailable") - } - - return "SearchResultsCommon{" + strings.Join(parts, " ") + "}" -} - -func (c *SearchResultsCommon) Equal(other *SearchResultsCommon) bool { - return reflect.DeepEqual(c, other) -} - func (c *SearchResultsResolver) LimitHit() bool { return c.IsLimitHit } @@ -221,7 +138,7 @@ func dedupSort(repos *types.RepoNames) { // SearchResultsResolver is a resolver for the GraphQL type `SearchResults` type SearchResultsResolver struct { SearchResults []SearchResultResolver - SearchResultsCommon + streaming.Stats alert *searchAlert start time.Time // when the results started being computed @@ -346,7 +263,7 @@ func (sr *SearchResultsResolver) DynamicFilters(ctx context.Context) []*searchFi // are @ and :, both of which are disallowed in git refs filter = filter + fmt.Sprintf(`@%s`, rev) } - limitHit := sr.SearchResultsCommon.Status.Get(repo.IDInt32())&search.RepoStatusLimitHit != 0 + limitHit := sr.Stats.Status.Get(repo.IDInt32())&search.RepoStatusLimitHit != 0 // Increment number of matches per repo. Add will override previous entry for uri repoToMatchCount[uri] += lineMatchCount add(filter, uri, repoToMatchCount[uri], limitHit, "repo", scoreDefault) @@ -383,11 +300,11 @@ func (sr *SearchResultsResolver) DynamicFilters(ctx context.Context) []*searchFi } } - if sr.SearchResultsCommon.ExcludedForks > 0 { - add("fork:yes", "fork:yes", int32(sr.SearchResultsCommon.ExcludedForks), sr.IsLimitHit, "repo", scoreImportant) + if sr.Stats.ExcludedForks > 0 { + add("fork:yes", "fork:yes", int32(sr.Stats.ExcludedForks), sr.IsLimitHit, "repo", scoreImportant) } - if sr.SearchResultsCommon.ExcludedArchived > 0 { - add("archived:yes", "archived:yes", int32(sr.SearchResultsCommon.ExcludedArchived), sr.IsLimitHit, "repo", scoreImportant) + if sr.Stats.ExcludedArchived > 0 { + add("archived:yes", "archived:yes", int32(sr.Stats.ExcludedArchived), sr.IsLimitHit, "repo", scoreImportant) } for _, result := range sr.SearchResults { if fm, ok := result.ToFileMatch(); ok { @@ -711,7 +628,7 @@ func (r *searchResolver) evaluateLeaf(ctx context.Context) (_ *SearchResultsReso if !result.cursor.Finished { // For stable result queries limitHit = true implies // there is a next cursor, and more results may exist. - result.SearchResultsCommon.IsLimitHit = true + result.Stats.IsLimitHit = true } return result, err } @@ -732,7 +649,7 @@ func (r *searchResolver) evaluateLeaf(ctx context.Context) (_ *SearchResultsReso switch { case err == context.DeadlineExceeded || (err == nil && rr.allReposTimedout()): status = "timeout" - case err == nil && rr.SearchResultsCommon.Status.Any(search.RepoStatusTimedout): + case err == nil && rr.Stats.Status.Any(search.RepoStatusTimedout): status = "partial_timeout" case err == nil && rr.alert != nil: status = "alert" @@ -872,7 +789,7 @@ func unionMerge(left, right *SearchResultsResolver) *SearchResultsResolver { } left.SearchResults = merged - left.SearchResultsCommon.update(&right.SearchResultsCommon) + left.Stats.Update(&right.Stats) return left } @@ -919,7 +836,7 @@ func intersectMerge(left, right *SearchResultsResolver) *SearchResultsResolver { merged = append(merged, leftMatch) } left.SearchResults = merged - left.SearchResultsCommon.update(&right.SearchResultsCommon) + left.Stats.Update(&right.Stats) return left } @@ -1096,7 +1013,7 @@ func (r *searchResolver) evaluateOr(ctx context.Context, scopeParameters []query if result == nil { return nil, nil } - // Do not rely on result.SearchResultsCommon.resultCount because it may + // Do not rely on result.Stats.resultCount because it may // count non-content matches and there's no easy way to know. if len(result.SearchResults) > wantCount { result.SearchResults = result.SearchResults[:wantCount] @@ -1110,7 +1027,7 @@ func (r *searchResolver) evaluateOr(ctx context.Context, scopeParameters []query } if new != nil { result = union(result, new) - // Do not rely on result.SearchResultsCommon.resultCount because it may + // Do not rely on result.Stats.resultCount because it may // count non-content matches and there's no easy way to know. if len(result.SearchResults) > wantCount { result.SearchResults = result.SearchResults[:wantCount] @@ -1744,21 +1661,21 @@ type aggregator struct { mu sync.Mutex results []SearchResultResolver - common SearchResultsCommon + common streaming.Stats multiErr *multierror.Error // fileMatches is a map from git:// URI of the file to FileMatch resolver // to merge multiple results of different types for the same file fileMatches map[string]*FileMatchResolver } -func (a *aggregator) get() ([]SearchResultResolver, SearchResultsCommon, *multierror.Error) { +func (a *aggregator) get() ([]SearchResultResolver, streaming.Stats, *multierror.Error) { a.mu.Lock() defer a.mu.Unlock() return a.results, a.common, a.multiErr } // report sends results down resultChannel and collects the results. -func (a *aggregator) report(ctx context.Context, results []SearchResultResolver, common *SearchResultsCommon, err error) { +func (a *aggregator) report(ctx context.Context, results []SearchResultResolver, common *streaming.Stats, err error) { if a.resultChannel != nil { a.resultChannel <- results } @@ -1768,10 +1685,10 @@ func (a *aggregator) report(ctx context.Context, results []SearchResultResolver, // collect the results. This doesn't send down resultChannel. Should only be // used by streaming supported backends. -func (a *aggregator) collect(ctx context.Context, results []SearchResultResolver, common *SearchResultsCommon, err error) { +func (a *aggregator) collect(ctx context.Context, results []SearchResultResolver, common *streaming.Stats, err error) { a.mu.Lock() defer a.mu.Unlock() - // Timeouts are reported through SearchResultsCommon so don't report an error for them + // Timeouts are reported through Stats so don't report an error for them if err != nil && !isContextError(ctx, err) { a.multiErr = multierror.Append(a.multiErr, errors.Wrap(err, "repository search failed")) } @@ -1792,7 +1709,7 @@ func (a *aggregator) collect(ctx context.Context, results []SearchResultResolver } } if common != nil { - a.common.update(common) + a.common.Update(common) } } @@ -2030,7 +1947,7 @@ func (r *searchResolver) doResults(ctx context.Context, forceOnlyResultType stri repos[repoRev.Repo.ID] = repoRev.Repo } - agg.report(ctx, nil, &SearchResultsCommon{ + agg.report(ctx, nil, &streaming.Stats{ Repos: repos, ExcludedForks: resolved.ExcludedRepos.Forks, ExcludedArchived: resolved.ExcludedRepos.Archived, @@ -2141,10 +2058,10 @@ func (r *searchResolver) doResults(ctx context.Context, forceOnlyResultType stri r.sortResults(ctx, results) resultsResolver := SearchResultsResolver{ - start: start, - SearchResultsCommon: common, - SearchResults: results, - alert: alert, + start: start, + Stats: common, + SearchResults: results, + alert: alert, } return &resultsResolver, multiErr.ErrorOrNil() diff --git a/cmd/frontend/graphqlbackend/search_results_test.go b/cmd/frontend/graphqlbackend/search_results_test.go index 7f365af9200d..26ec9bd01adb 100644 --- a/cmd/frontend/graphqlbackend/search_results_test.go +++ b/cmd/frontend/graphqlbackend/search_results_test.go @@ -22,6 +22,7 @@ import ( searchbackend "github.com/sourcegraph/sourcegraph/internal/search/backend" "github.com/sourcegraph/sourcegraph/internal/search/query" searchquerytypes "github.com/sourcegraph/sourcegraph/internal/search/query/types" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/types" "github.com/sourcegraph/sourcegraph/schema" ) @@ -92,8 +93,8 @@ func TestSearchResults(t *testing.T) { db.Mocks.Repos.MockGet(t, 1) db.Mocks.Repos.Count = mockCount - mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *SearchResultsCommon, error) { - return nil, &SearchResultsCommon{}, nil + mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *streaming.Stats, error) { + return nil, &streaming.Stats{}, nil } defer func() { mockSearchFilesInRepos = nil }() @@ -127,14 +128,14 @@ func TestSearchResults(t *testing.T) { db.Mocks.Repos.Count = mockCount calledSearchRepositories := false - mockSearchRepositories = func(args *search.TextParameters) ([]SearchResultResolver, *SearchResultsCommon, error) { + mockSearchRepositories = func(args *search.TextParameters) ([]SearchResultResolver, *streaming.Stats, error) { calledSearchRepositories = true - return nil, &SearchResultsCommon{}, nil + return nil, &streaming.Stats{}, nil } defer func() { mockSearchRepositories = nil }() calledSearchSymbols := false - mockSearchSymbols = func(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *SearchResultsCommon, err error) { + mockSearchSymbols = func(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *streaming.Stats, err error) { calledSearchSymbols = true if want := `(foo\d).*?(bar\*)`; args.PatternInfo.Pattern != want { t.Errorf("got %q, want %q", args.PatternInfo.Pattern, want) @@ -145,14 +146,14 @@ func TestSearchResults(t *testing.T) { defer func() { mockSearchSymbols = nil }() calledSearchFilesInRepos := atomic.NewBool(false) - mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *SearchResultsCommon, error) { + mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *streaming.Stats, error) { calledSearchFilesInRepos.Store(true) if want := `(foo\d).*?(bar\*)`; args.PatternInfo.Pattern != want { t.Errorf("got %q, want %q", args.PatternInfo.Pattern, want) } repo := &types.RepoName{ID: 1, Name: "repo"} fm := mkFileMatch(repo, "dir/file", 123) - return []*FileMatchResolver{fm}, &SearchResultsCommon{}, nil + return []*FileMatchResolver{fm}, &streaming.Stats{}, nil } defer func() { mockSearchFilesInRepos = nil }() @@ -192,14 +193,14 @@ func TestSearchResults(t *testing.T) { db.Mocks.Repos.Count = mockCount calledSearchRepositories := false - mockSearchRepositories = func(args *search.TextParameters) ([]SearchResultResolver, *SearchResultsCommon, error) { + mockSearchRepositories = func(args *search.TextParameters) ([]SearchResultResolver, *streaming.Stats, error) { calledSearchRepositories = true - return nil, &SearchResultsCommon{}, nil + return nil, &streaming.Stats{}, nil } defer func() { mockSearchRepositories = nil }() calledSearchSymbols := false - mockSearchSymbols = func(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *SearchResultsCommon, err error) { + mockSearchSymbols = func(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *streaming.Stats, err error) { calledSearchSymbols = true if want := `"foo\\d \"bar*\""`; args.PatternInfo.Pattern != want { t.Errorf("got %q, want %q", args.PatternInfo.Pattern, want) @@ -210,14 +211,14 @@ func TestSearchResults(t *testing.T) { defer func() { mockSearchSymbols = nil }() calledSearchFilesInRepos := atomic.NewBool(false) - mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *SearchResultsCommon, error) { + mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *streaming.Stats, error) { calledSearchFilesInRepos.Store(true) if want := `foo\\d "bar\*"`; args.PatternInfo.Pattern != want { t.Errorf("got %q, want %q", args.PatternInfo.Pattern, want) } repo := &types.RepoName{ID: 1, Name: "repo"} fm := mkFileMatch(repo, "dir/file", 123) - return []*FileMatchResolver{fm}, &SearchResultsCommon{}, nil + return []*FileMatchResolver{fm}, &streaming.Stats{}, nil } defer func() { mockSearchFilesInRepos = nil }() @@ -936,7 +937,7 @@ func TestCheckDiffCommitSearchLimits(t *testing.T) { func Test_SearchResultsResolver_ApproximateResultCount(t *testing.T) { type fields struct { results []SearchResultResolver - searchResultsCommon SearchResultsCommon + searchResultsCommon streaming.Stats alert *searchAlert start time.Time } @@ -963,7 +964,7 @@ func Test_SearchResultsResolver_ApproximateResultCount(t *testing.T) { name: "file matches limit hit", fields: fields{ results: []SearchResultResolver{&FileMatchResolver{}}, - searchResultsCommon: SearchResultsCommon{IsLimitHit: true}, + searchResultsCommon: streaming.Stats{IsLimitHit: true}, }, want: "1+", }, @@ -998,7 +999,7 @@ func Test_SearchResultsResolver_ApproximateResultCount(t *testing.T) { }, }, }, - searchResultsCommon: SearchResultsCommon{IsLimitHit: true}, + searchResultsCommon: streaming.Stats{IsLimitHit: true}, }, want: "2+", }, @@ -1006,10 +1007,10 @@ func Test_SearchResultsResolver_ApproximateResultCount(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sr := &SearchResultsResolver{ - SearchResults: tt.fields.results, - SearchResultsCommon: tt.fields.searchResultsCommon, - alert: tt.fields.alert, - start: tt.fields.start, + SearchResults: tt.fields.results, + Stats: tt.fields.searchResultsCommon, + alert: tt.fields.alert, + start: tt.fields.start, } if got := sr.ApproximateResultCount(); got != tt.want { t.Errorf("searchResultsResolver.ApproximateResultCount() = %v, want %v", got, tt.want) diff --git a/cmd/frontend/graphqlbackend/search_suggestions_test.go b/cmd/frontend/graphqlbackend/search_suggestions_test.go index 8a88fcdb0f38..a4ddd789460a 100644 --- a/cmd/frontend/graphqlbackend/search_suggestions_test.go +++ b/cmd/frontend/graphqlbackend/search_suggestions_test.go @@ -15,6 +15,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/api" "github.com/sourcegraph/sourcegraph/internal/db" "github.com/sourcegraph/sourcegraph/internal/search" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/types" "github.com/sourcegraph/sourcegraph/internal/vcs/git" "github.com/sourcegraph/sourcegraph/schema" @@ -47,7 +48,7 @@ func TestSearchSuggestions(t *testing.T) { } } - mockSearchSymbols = func(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *SearchResultsCommon, err error) { + mockSearchSymbols = func(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *streaming.Stats, err error) { // TODO test symbol suggestions return nil, nil, nil } @@ -103,7 +104,7 @@ func TestSearchSuggestions(t *testing.T) { defer git.ResetMocks() calledSearchFilesInRepos := atomic.NewBool(false) - mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *SearchResultsCommon, error) { + mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *streaming.Stats, error) { calledSearchFilesInRepos.Store(true) if want := "foo"; args.PatternInfo.Pattern != want { t.Errorf("got %q, want %q", args.PatternInfo.Pattern, want) @@ -111,7 +112,7 @@ func TestSearchSuggestions(t *testing.T) { fm := mkFileMatch(&types.RepoName{Name: "repo"}, "dir/file") fm.uri = "git://repo?rev#dir/file" fm.CommitID = "rev" - return []*FileMatchResolver{fm}, &SearchResultsCommon{}, nil + return []*FileMatchResolver{fm}, &streaming.Stats{}, nil } defer func() { mockSearchFilesInRepos = nil }() for _, v := range searchVersions { @@ -160,7 +161,7 @@ func TestSearchSuggestions(t *testing.T) { backend.Mocks.Repos.MockResolveRev_NoCheck(t, api.CommitID("deadbeef")) calledSearchFilesInRepos := atomic.NewBool(false) - mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *SearchResultsCommon, error) { + mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *streaming.Stats, error) { mu.Lock() defer mu.Unlock() calledSearchFilesInRepos.Store(true) @@ -177,7 +178,7 @@ func TestSearchSuggestions(t *testing.T) { mk("repo3", "dir/foo-repo3-file-name-match"), mk("repo1", "dir/foo-repo1-file-name-match"), mk("repo", "dir/file-content-match"), - }, &SearchResultsCommon{}, nil + }, &streaming.Stats{}, nil } defer func() { mockSearchFilesInRepos = nil }() @@ -240,7 +241,7 @@ func TestSearchSuggestions(t *testing.T) { defer func() { mockShowLangSuggestions = nil }() calledSearchFilesInRepos := atomic.NewBool(false) - mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *SearchResultsCommon, error) { + mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *streaming.Stats, error) { mu.Lock() defer mu.Unlock() calledSearchFilesInRepos.Store(true) @@ -253,7 +254,7 @@ func TestSearchSuggestions(t *testing.T) { } return []*FileMatchResolver{ mkFileMatch(&types.RepoName{Name: "foo-repo"}, "dir/file"), - }, &SearchResultsCommon{}, nil + }, &streaming.Stats{}, nil } defer func() { mockSearchFilesInRepos = nil }() @@ -361,7 +362,7 @@ func TestSearchSuggestions(t *testing.T) { defer func() { mockShowLangSuggestions = nil }() calledSearchFilesInRepos := atomic.NewBool(false) - mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *SearchResultsCommon, error) { + mockSearchFilesInRepos = func(args *search.TextParameters) ([]*FileMatchResolver, *streaming.Stats, error) { mu.Lock() defer mu.Unlock() calledSearchFilesInRepos.Store(true) @@ -374,7 +375,7 @@ func TestSearchSuggestions(t *testing.T) { } return []*FileMatchResolver{ mkFileMatch(&types.RepoName{Name: "foo-repo"}, "dir/bar-file"), - }, &SearchResultsCommon{}, nil + }, &streaming.Stats{}, nil } defer func() { mockSearchFilesInRepos = nil }() diff --git a/cmd/frontend/graphqlbackend/search_symbols.go b/cmd/frontend/graphqlbackend/search_symbols.go index 8529a24c89da..4e544f4528c3 100644 --- a/cmd/frontend/graphqlbackend/search_symbols.go +++ b/cmd/frontend/graphqlbackend/search_symbols.go @@ -23,6 +23,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/gituri" "github.com/sourcegraph/sourcegraph/internal/goroutine" "github.com/sourcegraph/sourcegraph/internal/search" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/symbols/protocol" "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/trace/ot" @@ -42,13 +43,13 @@ func (s *searchSymbolResult) uri() *gituri.URI { return s.baseURI.WithFilePath(s.symbol.Path) } -var mockSearchSymbols func(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *SearchResultsCommon, err error) +var mockSearchSymbols func(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *streaming.Stats, err error) // searchSymbols searches the given repos in parallel for symbols matching the given search query // it can be used for both search suggestions and search results // // May return partial results and an error -func searchSymbols(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *SearchResultsCommon, err error) { +func searchSymbols(ctx context.Context, args *search.TextParameters, limit int) (res []*FileMatchResolver, common *streaming.Stats, err error) { if mockSearchSymbols != nil { return mockSearchSymbols(ctx, args, limit) } @@ -71,7 +72,7 @@ func searchSymbols(ctx context.Context, args *search.TextParameters, limit int) ctx, cancelAll := context.WithCancel(ctx) defer cancelAll() - common = &SearchResultsCommon{} + common = &streaming.Stats{} indexed, err := newIndexedSearchRequest(ctx, args, symbolRequest) if err != nil { @@ -134,7 +135,7 @@ func searchSymbols(ctx context.Context, args *search.TextParameters, limit int) func() { mu.Lock() defer mu.Unlock() - common.update(&event.common) + common.Update(&event.common) tr.LogFields(otlog.Object("searchErr", event.err), otlog.Error(err), otlog.Bool("overLimitCanceled", overLimitCanceled)) if event.err != nil && err == nil && !overLimitCanceled { err = event.err @@ -171,7 +172,7 @@ func searchSymbols(ctx context.Context, args *search.TextParameters, limit int) run.Error(repoErr) } } - common.update(&repoCommon) + common.Update(&repoCommon) if repoSymbols != nil { addMatches(repoSymbols) } diff --git a/cmd/frontend/graphqlbackend/textsearch.go b/cmd/frontend/graphqlbackend/textsearch.go index 87d02ac990cb..0891a3716dba 100644 --- a/cmd/frontend/graphqlbackend/textsearch.go +++ b/cmd/frontend/graphqlbackend/textsearch.go @@ -12,6 +12,7 @@ import ( "github.com/pkg/errors" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" "github.com/sourcegraph/sourcegraph/internal/trace" otlog "github.com/opentracing/opentracing-go/log" @@ -283,7 +284,7 @@ func fileMatchURI(name api.RepoName, ref, path string) string { return b.String() } -var mockSearchFilesInRepos func(args *search.TextParameters) ([]*FileMatchResolver, *SearchResultsCommon, error) +var mockSearchFilesInRepos func(args *search.TextParameters) ([]*FileMatchResolver, *streaming.Stats, error) func fileMatchResultsToSearchResults(results []*FileMatchResolver) []SearchResultResolver { results2 := make([]SearchResultResolver, len(results)) @@ -295,7 +296,7 @@ func fileMatchResultsToSearchResults(results []*FileMatchResolver) []SearchResul // searchFilesInRepos searches a set of repos for a pattern. // For c != nil searchFilesInRepos will send results down c. -func searchFilesInRepos(ctx context.Context, args *search.TextParameters, c chan<- []SearchResultResolver) (res []*FileMatchResolver, common *SearchResultsCommon, finalErr error) { +func searchFilesInRepos(ctx context.Context, args *search.TextParameters, c chan<- []SearchResultResolver) (res []*FileMatchResolver, common *streaming.Stats, finalErr error) { if mockSearchFilesInRepos != nil { return mockSearchFilesInRepos(args) } @@ -314,7 +315,7 @@ func searchFilesInRepos(ctx context.Context, args *search.TextParameters, c chan ctx, cancel := context.WithCancel(ctx) defer cancel() - common = &SearchResultsCommon{} + common = &streaming.Stats{} indexedTyp := textRequest if args.PatternInfo.IsStructuralPat { @@ -508,7 +509,7 @@ func searchFilesInRepos(ctx context.Context, args *search.TextParameters, c chan cancel() } } - common.update(&repoCommon) + common.Update(&repoCommon) addMatches(matches) }(limitCtx, limitDone) // ends the Go routine for a call to searcher for a repo } // ends the for loop iterating over repo's revs @@ -526,7 +527,7 @@ func searchFilesInRepos(ctx context.Context, args *search.TextParameters, c chan func() { mu.Lock() defer mu.Unlock() - common.update(&event.common) + common.Update(&event.common) tr.LogFields(otlog.Int("matches.len", len(event.results)), otlog.Error(event.err), otlog.Bool("overLimitCanceled", overLimitCanceled)) if event.err != nil && searchErr == nil && !overLimitCanceled { diff --git a/cmd/frontend/graphqlbackend/zoekt.go b/cmd/frontend/graphqlbackend/zoekt.go index 70edc17bafe2..145ce45ef90a 100644 --- a/cmd/frontend/graphqlbackend/zoekt.go +++ b/cmd/frontend/graphqlbackend/zoekt.go @@ -19,6 +19,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/gituri" "github.com/sourcegraph/sourcegraph/internal/search" "github.com/sourcegraph/sourcegraph/internal/search/query" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" zoektutil "github.com/sourcegraph/sourcegraph/internal/search/zoekt" "github.com/sourcegraph/sourcegraph/internal/symbols/protocol" "github.com/sourcegraph/sourcegraph/internal/trace" @@ -201,7 +202,7 @@ func (s *indexedSearchRequest) Repos() map[string]*search.RepositoryRevisions { type streamFunc func(context.Context, *search.TextParameters, *indexedRepoRevs, indexedRequestType, func(t time.Time) time.Duration) <-chan zoektSearchStreamEvent type indexedSearchEvent struct { - common SearchResultsCommon + common streaming.Stats results []*FileMatchResolver err error } @@ -238,7 +239,7 @@ func (s *indexedSearchRequest) doSearch(ctx context.Context, c chan<- indexedSea zoektStream = zoektSearchHEADOnlyFilesStream default: err := fmt.Errorf("unexpected indexedSearchRequest type: %q", s.typ) - c <- indexedSearchEvent{common: SearchResultsCommon{}, results: nil, err: err} + c <- indexedSearchEvent{common: streaming.Stats{}, results: nil, err: err} return } @@ -265,12 +266,12 @@ func (s *indexedSearchRequest) doSearch(ctx context.Context, c chan<- indexedSea if err == errNoResultsInTimeout { err = nil - c <- indexedSearchEvent{common: SearchResultsCommon{Status: mkStatusMap(search.RepoStatusTimedout | search.RepoStatusIndexed)}} + c <- indexedSearchEvent{common: streaming.Stats{Status: mkStatusMap(search.RepoStatusTimedout | search.RepoStatusIndexed)}} return } if err != nil { - c <- indexedSearchEvent{common: SearchResultsCommon{}, results: nil, err: err} + c <- indexedSearchEvent{common: streaming.Stats{}, results: nil, err: err} return } @@ -292,7 +293,7 @@ func (s *indexedSearchRequest) doSearch(ctx context.Context, c chan<- indexedSea } c <- indexedSearchEvent{ - common: SearchResultsCommon{ + common: streaming.Stats{ Status: statusMap, IsLimitHit: event.limitHit, }, @@ -303,7 +304,7 @@ func (s *indexedSearchRequest) doSearch(ctx context.Context, c chan<- indexedSea // Successfully searched everything. Communicate every indexed repo was // searched in case it didn't have a result. - c <- indexedSearchEvent{common: SearchResultsCommon{Status: mkStatusMap(search.RepoStatusSearched | search.RepoStatusIndexed)}} + c <- indexedSearchEvent{common: streaming.Stats{Status: mkStatusMap(search.RepoStatusSearched | search.RepoStatusIndexed)}} } var errNoResultsInTimeout = errors.New("no results found in specified timeout") diff --git a/cmd/frontend/graphqlbackend/zoekt_test.go b/cmd/frontend/graphqlbackend/zoekt_test.go index 49f163236842..0a9b25fe2aa0 100644 --- a/cmd/frontend/graphqlbackend/zoekt_test.go +++ b/cmd/frontend/graphqlbackend/zoekt_test.go @@ -27,6 +27,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/search" searchbackend "github.com/sourcegraph/sourcegraph/internal/search/backend" "github.com/sourcegraph/sourcegraph/internal/search/query" + "github.com/sourcegraph/sourcegraph/internal/search/streaming" zoektutil "github.com/sourcegraph/sourcegraph/internal/search/zoekt" "github.com/sourcegraph/sourcegraph/internal/symbols/protocol" "github.com/sourcegraph/sourcegraph/internal/trace" @@ -67,7 +68,7 @@ func TestIndexedSearch(t *testing.T) { wantMatchURLs []string wantMatchInputRevs []string wantUnindexed []*search.RepositoryRevisions - wantCommon SearchResultsCommon + wantCommon streaming.Stats wantErr bool }{ { @@ -79,7 +80,7 @@ func TestIndexedSearch(t *testing.T) { useFullDeadline: false, since: func(time.Time) time.Duration { return time.Second - time.Millisecond }, }, - wantCommon: SearchResultsCommon{ + wantCommon: streaming.Stats{ Status: mkStatusMap(map[string]search.RepoStatus{ "foo/bar": search.RepoStatusSearched | search.RepoStatusIndexed, "foo/foobar": search.RepoStatusSearched | search.RepoStatusIndexed, @@ -96,7 +97,7 @@ func TestIndexedSearch(t *testing.T) { useFullDeadline: false, since: func(time.Time) time.Duration { return time.Minute }, }, - wantCommon: SearchResultsCommon{ + wantCommon: streaming.Stats{ Status: mkStatusMap(map[string]search.RepoStatus{ "foo/bar": search.RepoStatusIndexed | search.RepoStatusTimedout, "foo/foobar": search.RepoStatusIndexed | search.RepoStatusTimedout, @@ -112,7 +113,7 @@ func TestIndexedSearch(t *testing.T) { useFullDeadline: true, since: func(time.Time) time.Duration { return 0 }, }, - wantCommon: SearchResultsCommon{ + wantCommon: streaming.Stats{ Status: mkStatusMap(map[string]search.RepoStatus{ "foo/bar": search.RepoStatusIndexed | search.RepoStatusTimedout, "foo/foobar": search.RepoStatusIndexed | search.RepoStatusTimedout, @@ -173,7 +174,7 @@ func TestIndexedSearch(t *testing.T) { "", "", }, - wantCommon: SearchResultsCommon{ + wantCommon: streaming.Stats{ Status: mkStatusMap(map[string]search.RepoStatus{ "foo/bar": search.RepoStatusSearched | search.RepoStatusIndexed, "foo/foobar": search.RepoStatusSearched | search.RepoStatusIndexed, @@ -203,7 +204,7 @@ func TestIndexedSearch(t *testing.T) { }, since: func(time.Time) time.Duration { return 0 }, }, - wantCommon: SearchResultsCommon{ + wantCommon: streaming.Stats{ Status: mkStatusMap(map[string]search.RepoStatus{ "foo/bar": search.RepoStatusSearched | search.RepoStatusIndexed, }), @@ -238,7 +239,7 @@ func TestIndexedSearch(t *testing.T) { }, }, }, - wantCommon: SearchResultsCommon{ + wantCommon: streaming.Stats{ Status: mkStatusMap(map[string]search.RepoStatus{ "foo/bar": search.RepoStatusSearched | search.RepoStatusIndexed, }), @@ -315,7 +316,7 @@ func TestIndexedSearch(t *testing.T) { // Once we return more than one event we have to account for the proper order of results // in the tests. var ( - gotCommon SearchResultsCommon + gotCommon streaming.Stats gotFm []*FileMatchResolver ) for event := range indexed.Search(tt.args.ctx) { @@ -323,7 +324,7 @@ func TestIndexedSearch(t *testing.T) { t.Errorf("zoektSearchHEAD() error = %v, wantErr = %v", err, tt.wantErr) return } - gotCommon.update(&event.common) + gotCommon.Update(&event.common) gotFm = append(gotFm, event.results...) } diff --git a/cmd/frontend/internal/search/search.go b/cmd/frontend/internal/search/search.go index d95e7f9a5592..bb65832c0278 100644 --- a/cmd/frontend/internal/search/search.go +++ b/cmd/frontend/internal/search/search.go @@ -11,6 +11,7 @@ import ( "time" "github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend" + "github.com/sourcegraph/sourcegraph/internal/search/streaming/api" "github.com/sourcegraph/sourcegraph/internal/trace" ) @@ -175,7 +176,17 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { }) } - pr := resultsResolver.Progress() + pr := api.BuildProgressEvent(api.ProgressStats{ + MatchCount: int(resultsResolver.MatchCount()), + ElapsedMilliseconds: int(resultsResolver.ElapsedMilliseconds()), + RepositoriesCount: int(resultsResolver.RepositoriesCount()), + ExcludedArchived: resultsResolver.ExcludedArchived, + ExcludedForks: resultsResolver.ExcludedForks, + Timedout: toNamer(resultsResolver.Timedout), + Missing: toNamer(resultsResolver.Missing), + Cloning: toNamer(resultsResolver.Cloning), + LimitHit: resultsResolver.IsLimitHit, + }) pr.Done = true _ = eventWriter.Event("progress", pr) @@ -183,6 +194,15 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { _ = eventWriter.Event("done", map[string]interface{}{}) } +func toNamer(f func() []*graphqlbackend.RepositoryResolver) []api.Namer { + rs := f() + n := make([]api.Namer, 0, len(rs)) + for _, r := range rs { + n = append(n, r) + } + return n +} + type searchResolver interface { Results(context.Context) (*graphqlbackend.SearchResultsResolver, error) SetResultChannel(c chan<- []graphqlbackend.SearchResultResolver) diff --git a/cmd/frontend/graphqlbackend/search_progress.go b/internal/search/streaming/api/progress.go similarity index 55% rename from cmd/frontend/graphqlbackend/search_progress.go rename to internal/search/streaming/api/progress.go index d0847efee025..e7fed1d7fecf 100644 --- a/cmd/frontend/graphqlbackend/search_progress.go +++ b/internal/search/streaming/api/progress.go @@ -1,33 +1,50 @@ -package graphqlbackend +package api import ( "fmt" "strconv" "strings" - - "github.com/sourcegraph/sourcegraph/internal/search" ) -func number(i int) string { - if i < 1000 { - return strconv.Itoa(i) +// BuildProgressEvent builds a progress event from a final results resolver. +func BuildProgressEvent(stats ProgressStats) Progress { + skipped := []Skipped{} + + for _, handler := range skippedHandlers { + if sk, ok := handler(stats); ok { + skipped = append(skipped, sk) + } } - if i < 10000 { - return fmt.Sprintf("%d,%d", i/1000, i%1000) + + return Progress{ + RepositoriesCount: intPtr(stats.RepositoriesCount), + MatchCount: stats.MatchCount, + DurationMs: stats.ElapsedMilliseconds, + Skipped: skipped, } - return fmt.Sprintf("%dk", i/1000) } -func plural(one, many string, n int) string { - if n == 1 { - return one - } - return many +type Namer interface { + Name() string +} + +type ProgressStats struct { + MatchCount int + ElapsedMilliseconds int + RepositoriesCount int + ExcludedArchived int + ExcludedForks int + + Timedout []Namer + Missing []Namer + Cloning []Namer + + LimitHit bool } -func skippedReposHandler(repos []*RepositoryResolver, titleVerb, messageReason string, base search.Skipped) (search.Skipped, bool) { +func skippedReposHandler(repos []Namer, titleVerb, messageReason string, base Skipped) (Skipped, bool) { if len(repos) == 0 { - return search.Skipped{}, false + return Skipped{}, false } amount := number(len(repos)) @@ -55,78 +72,78 @@ func skippedReposHandler(repos []*RepositoryResolver, titleVerb, messageReason s return base, true } -func repositoryCloningHandler(resultsResolver *SearchResultsResolver) (search.Skipped, bool) { - repos := resultsResolver.Cloning() +func repositoryCloningHandler(resultsResolver ProgressStats) (Skipped, bool) { + repos := resultsResolver.Cloning messageReason := fmt.Sprintf("could not be searched since %s still cloning", plural("it is", "they are", len(repos))) - return skippedReposHandler(repos, "cloning", messageReason, search.Skipped{ - Reason: search.RepositoryCloning, - Severity: search.SeverityInfo, + return skippedReposHandler(repos, "cloning", messageReason, Skipped{ + Reason: RepositoryCloning, + Severity: SeverityInfo, }) } -func repositoryMissingHandler(resultsResolver *SearchResultsResolver) (search.Skipped, bool) { - return skippedReposHandler(resultsResolver.Missing(), "missing", "could not be searched", search.Skipped{ - Reason: search.RepositoryMissing, - Severity: search.SeverityInfo, +func repositoryMissingHandler(resultsResolver ProgressStats) (Skipped, bool) { + return skippedReposHandler(resultsResolver.Missing, "missing", "could not be searched", Skipped{ + Reason: RepositoryMissing, + Severity: SeverityInfo, }) } -func shardTimeoutHandler(resultsResolver *SearchResultsResolver) (search.Skipped, bool) { +func shardTimeoutHandler(resultsResolver ProgressStats) (Skipped, bool) { // This is not the same, but once we expose this more granular details // from our backend it will be shard specific. - return skippedReposHandler(resultsResolver.Timedout(), "timed out", "could not be searched in time", search.Skipped{ - Reason: search.ShardTimeout, - Severity: search.SeverityWarn, + return skippedReposHandler(resultsResolver.Timedout, "timed out", "could not be searched in time", Skipped{ + Reason: ShardTimeout, + Severity: SeverityWarn, }) } -func shardMatchLimitHandler(resultsResolver *SearchResultsResolver) (search.Skipped, bool) { +func shardMatchLimitHandler(resultsResolver ProgressStats) (Skipped, bool) { // We don't have the details of repo vs shard vs document limits yet. So // we just pretend all our shard limits. - if !resultsResolver.LimitHit() { - return search.Skipped{}, false + if !resultsResolver.LimitHit { + return Skipped{}, false } - return search.Skipped{ - Reason: search.ShardMatchLimit, + return Skipped{ + Reason: ShardMatchLimit, Title: "result limit hit", Message: "Not all results have been returned due to hitting a match limit. Sourcegraph has limits for the number of results returned from a line, document and repository.", - Severity: search.SeverityInfo, + Severity: SeverityInfo, }, true } -func excludedForkHandler(resultsResolver *SearchResultsResolver) (search.Skipped, bool) { +func excludedForkHandler(resultsResolver ProgressStats) (Skipped, bool) { forks := resultsResolver.ExcludedForks if forks == 0 { - return search.Skipped{}, false + return Skipped{}, false } amount := number(forks) - return search.Skipped{ - Reason: search.ExcludedFork, + return Skipped{ + Reason: ExcludedFork, Title: fmt.Sprintf("%s forked", amount), Message: "By default we exclude forked repositories. Include them with `fork:yes` in your query.", - Severity: search.SeverityInfo, - Suggested: &search.SkippedSuggested{ + Severity: SeverityInfo, + Suggested: &SkippedSuggested{ Title: "include forked", QueryExpression: "fork:yes", }, }, true } -func excludedArchiveHandler(resultsResolver *SearchResultsResolver) (search.Skipped, bool) { +func excludedArchiveHandler(resultsResolver ProgressStats) (Skipped, bool) { archived := resultsResolver.ExcludedArchived if archived == 0 { - return search.Skipped{}, false + return Skipped{}, false } amount := number(archived) - return search.Skipped{ - Reason: search.ExcludedArchive, + return Skipped{ + Reason: ExcludedArchive, Title: fmt.Sprintf("%s archived", amount), Message: "By default we exclude archived repositories. Include them with `archived:yes` in your query.", - Severity: search.SeverityInfo, - Suggested: &search.SkippedSuggested{ + Severity: SeverityInfo, + Suggested: &SkippedSuggested{ Title: "include archived", QueryExpression: "archived:yes", }, @@ -134,7 +151,7 @@ func excludedArchiveHandler(resultsResolver *SearchResultsResolver) (search.Skip } // TODO implement all skipped reasons -var skippedHandlers = []func(*SearchResultsResolver) (search.Skipped, bool){ +var skippedHandlers = []func(stats ProgressStats) (Skipped, bool){ repositoryMissingHandler, repositoryCloningHandler, // documentMatchLimitHandler, @@ -145,24 +162,23 @@ var skippedHandlers = []func(*SearchResultsResolver) (search.Skipped, bool){ excludedArchiveHandler, } -// Progress builds a progress event from a final results resolver. -func (sr *SearchResultsResolver) Progress() search.Progress { - skipped := []search.Skipped{} +func intPtr(i int) *int { + return &i +} - for _, handler := range skippedHandlers { - if sk, ok := handler(sr); ok { - skipped = append(skipped, sk) - } +func number(i int) string { + if i < 1000 { + return strconv.Itoa(i) } - - return search.Progress{ - RepositoriesCount: intPtr(int(sr.RepositoriesCount())), - MatchCount: int(sr.MatchCount()), - DurationMs: int(sr.ElapsedMilliseconds()), - Skipped: skipped, + if i < 10000 { + return fmt.Sprintf("%d,%d", i/1000, i%1000) } + return fmt.Sprintf("%dk", i/1000) } -func intPtr(i int) *int { - return &i +func plural(one, many string, n int) string { + if n == 1 { + return one + } + return many } diff --git a/internal/search/streaming/api/progress_test.go b/internal/search/streaming/api/progress_test.go new file mode 100644 index 000000000000..461f77cdcbd6 --- /dev/null +++ b/internal/search/streaming/api/progress_test.go @@ -0,0 +1,60 @@ +package api + +import ( + "flag" + "fmt" + "testing" + + "github.com/sourcegraph/sourcegraph/internal/testutil" +) + +var updateGolden = flag.Bool("update", false, "Updastdata goldens") + +func TestSearchProgress(t *testing.T) { + var timedout100 []Namer + for i := 0; i < 100; i++ { + r := repo{fmt.Sprintf("timedout-%d", i)} + timedout100 = append(timedout100, r) + } + cases := map[string]ProgressStats{ + "empty": {}, + "timedout100": { + MatchCount: 0, + ElapsedMilliseconds: 0, + RepositoriesCount: 100, + ExcludedArchived: 0, + ExcludedForks: 0, + Timedout: timedout100, + Missing: nil, + Cloning: nil, + LimitHit: false, + }, + "all": { + MatchCount: 1, + ElapsedMilliseconds: 0, + RepositoriesCount: 5, + ExcludedArchived: 1, + ExcludedForks: 5, + Timedout: []Namer{repo{"timedout-1"}}, + Missing: []Namer{repo{"missing-1"}, repo{"missing-2"}}, + Cloning: []Namer{repo{"cloning-1"}}, + LimitHit: true, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + got := BuildProgressEvent(c) + got.DurationMs = 0 // clear out non-deterministic field + testutil.AssertGolden(t, "testdata/golden/"+t.Name()+".json", *updateGolden, got) + }) + } +} + +type repo struct { + name string +} + +func (r repo) Name() string { + return r.name +} diff --git a/cmd/frontend/graphqlbackend/testdata/golden/TestSearchProgress/all.json b/internal/search/streaming/api/testdata/golden/TestSearchProgress/all.json similarity index 100% rename from cmd/frontend/graphqlbackend/testdata/golden/TestSearchProgress/all.json rename to internal/search/streaming/api/testdata/golden/TestSearchProgress/all.json diff --git a/cmd/frontend/graphqlbackend/testdata/golden/TestSearchProgress/empty.json b/internal/search/streaming/api/testdata/golden/TestSearchProgress/empty.json similarity index 100% rename from cmd/frontend/graphqlbackend/testdata/golden/TestSearchProgress/empty.json rename to internal/search/streaming/api/testdata/golden/TestSearchProgress/empty.json diff --git a/cmd/frontend/graphqlbackend/testdata/golden/TestSearchProgress/timedout100.json b/internal/search/streaming/api/testdata/golden/TestSearchProgress/timedout100.json similarity index 60% rename from cmd/frontend/graphqlbackend/testdata/golden/TestSearchProgress/timedout100.json rename to internal/search/streaming/api/testdata/golden/TestSearchProgress/timedout100.json index a4a68c247242..347edf651ba6 100644 --- a/cmd/frontend/graphqlbackend/testdata/golden/TestSearchProgress/timedout100.json +++ b/internal/search/streaming/api/testdata/golden/TestSearchProgress/timedout100.json @@ -7,7 +7,7 @@ { "reason": "shard-timeout", "title": "100 timed out", - "message": "100 repositories could not be searched in time. Try searching again or reducing the scope of your query with `repo:`, `repogroup:` or other filters.\n* `timedout-33`\n* `timedout-19`\n* `timedout-80`\n* `timedout-34`\n* `timedout-69`\n* `timedout-28`\n* `timedout-0`\n* `timedout-86`\n* `timedout-78`\n* `timedout-52`\n* ...", + "message": "100 repositories could not be searched in time. Try searching again or reducing the scope of your query with `repo:`, `repogroup:` or other filters.\n* `timedout-0`\n* `timedout-1`\n* `timedout-2`\n* `timedout-3`\n* `timedout-4`\n* `timedout-5`\n* `timedout-6`\n* `timedout-7`\n* `timedout-8`\n* `timedout-9`\n* ...", "severity": "warn" } ] diff --git a/internal/search/progress.go b/internal/search/streaming/api/types.go similarity index 99% rename from internal/search/progress.go rename to internal/search/streaming/api/types.go index 7cb54589c76b..96a2fcb646b4 100644 --- a/internal/search/progress.go +++ b/internal/search/streaming/api/types.go @@ -1,4 +1,4 @@ -package search +package api // Progress is an aggregate type representing a progress update. type Progress struct { diff --git a/internal/search/streaming/progress.go b/internal/search/streaming/progress.go new file mode 100644 index 000000000000..683c53be4815 --- /dev/null +++ b/internal/search/streaming/progress.go @@ -0,0 +1,95 @@ +package streaming + +import ( + "fmt" + "reflect" + "strings" + + "github.com/sourcegraph/sourcegraph/internal/api" + "github.com/sourcegraph/sourcegraph/internal/search" + "github.com/sourcegraph/sourcegraph/internal/types" +) + +// Stats contains fields that should be returned by all funcs +// that contribute to the overall search result set. +type Stats struct { + // IsLimitHit is true if we do not have all results that match the query. + IsLimitHit bool + + // Repos that were matched by the repo-related filters. This should only + // be set once by search, when we have resolved Repos. + Repos map[api.RepoID]*types.RepoName + + // Status is a RepoStatusMap of repository search statuses. + Status search.RepoStatusMap + + // ExcludedForks is the count of excluded forked repos because the search + // query doesn't apply to them, but that we want to know about. + ExcludedForks int + + // ExcludedArchived is the count of excluded archived repos because the + // search query doesn't apply to them, but that we want to know about. + ExcludedArchived int + + // IsIndexUnavailable is true if indexed search was unavailable. + IsIndexUnavailable bool +} + +// update updates c with the other data, deduping as necessary. It modifies c but +// does not modify other. +func (c *Stats) Update(other *Stats) { + if other == nil { + return + } + + c.IsLimitHit = c.IsLimitHit || other.IsLimitHit + c.IsIndexUnavailable = c.IsIndexUnavailable || other.IsIndexUnavailable + + if c.Repos == nil { + c.Repos = other.Repos + } else { + for id, r := range other.Repos { + c.Repos[id] = r + } + } + + c.Status.Union(&other.Status) + + c.ExcludedForks = c.ExcludedForks + other.ExcludedForks + c.ExcludedArchived = c.ExcludedArchived + other.ExcludedArchived +} + +func (c *Stats) String() string { + if c == nil { + return "Stats{}" + } + + parts := []string{ + fmt.Sprintf("status=%s", c.Status.String()), + } + nums := []struct { + name string + n int + }{ + {"repos", len(c.Repos)}, + {"excludedForks", c.ExcludedForks}, + {"excludedArchived", c.ExcludedArchived}, + } + for _, p := range nums { + if p.n != 0 { + parts = append(parts, fmt.Sprintf("%s=%d", p.name, p.n)) + } + } + if c.IsLimitHit { + parts = append(parts, "limitHit") + } + if c.IsIndexUnavailable { + parts = append(parts, "indexUnavailable") + } + + return "Stats{" + strings.Join(parts, " ") + "}" +} + +func (c *Stats) Equal(other *Stats) bool { + return reflect.DeepEqual(c, other) +}