From a5131a09eb459e605b978a7b67186a090309b5f2 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Thu, 30 Jan 2025 11:03:00 -0500 Subject: [PATCH 01/28] Add Audit Log tab to tctl top (#51620) The new tab consumes `audit_failed_emit_events`, `teleport_audit_stored_trimmed_events`, and `teleport_audit_emitted_event_sizes` to help troubleshoot issues related to audit log backends. The metrics are displayed in graphs which illustrate the change in metrics over the sampling period. --- tool/tctl/common/top/model.go | 59 ++++++++++++++++- tool/tctl/common/top/report.go | 118 ++++++++++++++++++++++++++++++++- tool/tsh/common/latency.go | 1 + 3 files changed, 176 insertions(+), 2 deletions(-) diff --git a/tool/tctl/common/top/model.go b/tool/tctl/common/top/model.go index be7debcd6105e..dd4668a197d64 100644 --- a/tool/tctl/common/top/model.go +++ b/tool/tctl/common/top/model.go @@ -103,6 +103,8 @@ func (m *topModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.selected = 2 case "4": m.selected = 3 + case "5": + m.selected = 4 case "right": m.selected = min(m.selected+1, len(tabs)-1) case "left": @@ -218,6 +220,8 @@ func (m *topModel) contentView() string { return renderCache(m.report, m.height, m.width) case 3: return renderWatcher(m.report, m.height, m.width) + case 4: + return renderAudit(m.report, m.height, m.width) default: return "" } @@ -412,6 +416,7 @@ func renderWatcher(report *Report, height, width int) string { eventData, asciigraph.Height(graphHeight), asciigraph.Width(graphWidth-15), + asciigraph.UpperBound(1), ) eventCountContent := boxedView("Events/Sec", countPlot, graphWidth) @@ -423,6 +428,7 @@ func renderWatcher(report *Report, height, width int) string { sizeData, asciigraph.Height(graphHeight), asciigraph.Width(graphWidth-15), + asciigraph.UpperBound(1), ) eventSizeContent := boxedView("Bytes/Sec", sizePlot, graphWidth) @@ -449,6 +455,54 @@ func renderWatcher(report *Report, height, width int) string { ) } +// renderAudit generates the view for the audit stats tab. +func renderAudit(report *Report, height, width int) string { + graphHeight := height / 3 + graphWidth := width + + eventsLegend := lipgloss.JoinHorizontal( + lipgloss.Left, + "- Emitted", + failedEventStyle.Render(" - Failed"), + trimmedEventStyle.Render(" - Trimmed"), + ) + + eventsPlot := asciigraph.PlotMany( + [][]float64{ + report.Audit.EmittedEventsBuffer.Data(graphWidth - 15), + report.Audit.FailedEventsBuffer.Data(graphWidth - 15), + report.Audit.TrimmedEventsBuffer.Data(graphWidth - 15), + }, + asciigraph.Height(graphHeight), + asciigraph.Width(graphWidth-15), + asciigraph.UpperBound(1), + asciigraph.SeriesColors(asciigraph.Default, asciigraph.Red, asciigraph.Goldenrod), + asciigraph.Caption(eventsLegend), + ) + eventGraph := boxedView("Events Emitted", eventsPlot, graphWidth) + + eventSizePlot := asciigraph.Plot( + report.Audit.EventSizeBuffer.Data(graphWidth-15), + asciigraph.Height(graphHeight), + asciigraph.Width(graphWidth-15), + asciigraph.UpperBound(1), + ) + sizeGraph := boxedView("Event Sizes", eventSizePlot, graphWidth) + + graphStyle := lipgloss.NewStyle(). + Width(graphWidth). + Padding(0). + Margin(0). + Align(lipgloss.Left) + + return lipgloss.JoinVertical(lipgloss.Left, + graphStyle.Render( + eventGraph, + sizeGraph, + ), + ) +} + // tabView renders the tabbed content in the header. func tabView(selectedTab int) string { output := lipgloss.NewStyle(). @@ -520,5 +574,8 @@ var ( selectedColor = lipgloss.Color("4") - tabs = []string{"Common", "Backend", "Cache", "Watcher"} + failedEventStyle = lipgloss.NewStyle().Foreground(lipgloss.Color(fmt.Sprintf("%d", asciigraph.Red))) + trimmedEventStyle = lipgloss.NewStyle().Foreground(lipgloss.Color(fmt.Sprintf("%d", asciigraph.Goldenrod))) + + tabs = []string{"Common", "Backend", "Cache", "Watcher", "Audit"} ) diff --git a/tool/tctl/common/top/report.go b/tool/tctl/common/top/report.go index 95ff68382804f..b185daf9958e9 100644 --- a/tool/tctl/common/top/report.go +++ b/tool/tctl/common/top/report.go @@ -59,6 +59,32 @@ type Report struct { Cluster ClusterStats // Watcher is watcher stats Watcher *WatcherStats + // Audit contains stats for audit event backends. + Audit *AuditStats +} + +// AuditStats contains metrics related to the audit log. +type AuditStats struct { + // FailedEventsCounter tallies the frequency of failed events. + FailedEventsCounter *Counter + // FailedEventsBuffer contains the historical frequencies of + // the FailedEventsCounter. + FailedEventsBuffer *utils.CircularBuffer + // EmittedEventsCounter tallies the frequency of all emitted events. + EmittedEventsCounter *Counter + // EmittedEventsBuffer contains the historical frequencies of + // the EmittedEventsCounter. + EmittedEventsBuffer *utils.CircularBuffer + // EventSizeCounter tallies the frequency of all events. + EventSizeCounter *Counter + // EventSizeBuffer contains the historical sizes of + // the EventSizeCounter. + EventSizeBuffer *utils.CircularBuffer + // EventsCounter tallies the frequency of trimmed events. + TrimmedEventsCounter *Counter + // TrimmedEventsBuffer contains the historical sizes of + // the TrimmedEventsCounter. + TrimmedEventsBuffer *utils.CircularBuffer } // WatcherStats contains watcher stats @@ -256,7 +282,8 @@ type Counter struct { } // SetFreq sets counter frequency based on the previous value -// and the time period +// and the time period. SetFreq should be preffered over UpdateFreq +// when initializing a Counter from previous statistics. func (c *Counter) SetFreq(prevCount Counter, period time.Duration) { if period == 0 { return @@ -265,6 +292,25 @@ func (c *Counter) SetFreq(prevCount Counter, period time.Duration) { c.Freq = &freq } +// UpdateFreq sets counter frequency based on the previous value +// and the time period. UpdateFreq should be preferred over SetFreq +// if the Counter is reused. +func (c *Counter) UpdateFreq(currentCount int64, period time.Duration) { + if period == 0 { + return + } + + // Do not calculate the frequency until there are two data points. + if c.Count == 0 && c.Freq == nil { + c.Count = currentCount + return + } + + freq := float64(currentCount-c.Count) / float64(period/time.Second) + c.Freq = &freq + c.Count = currentCount +} + // GetFreq returns frequency of the request func (c Counter) GetFreq() float64 { if c.Freq == nil { @@ -424,6 +470,13 @@ func generateReport(metrics map[string]*dto.MetricFamily, prev *Report, period t Roles: getGaugeValue(metrics[prometheus.BuildFQName(teleport.MetricNamespace, "", "roles_total")]), } + var auditStats *AuditStats + if prev != nil { + auditStats = prev.Audit + } + + re.Audit = getAuditStats(metrics, auditStats, period) + if prev != nil { re.Cluster.GenerateRequestsCount.SetFreq(prev.Cluster.GenerateRequestsCount, period) re.Cluster.GenerateRequestsThrottledCount.SetFreq(prev.Cluster.GenerateRequestsThrottledCount, period) @@ -548,6 +601,69 @@ func getWatcherStats(metrics map[string]*dto.MetricFamily, prev *WatcherStats, p return stats } +func getAuditStats(metrics map[string]*dto.MetricFamily, prev *AuditStats, period time.Duration) *AuditStats { + if prev == nil { + failed, err := utils.NewCircularBuffer(150) + if err != nil { + return nil + } + + events, err := utils.NewCircularBuffer(150) + if err != nil { + return nil + } + + trimmed, err := utils.NewCircularBuffer(150) + if err != nil { + return nil + } + + sizes, err := utils.NewCircularBuffer(150) + if err != nil { + return nil + } + + prev = &AuditStats{ + FailedEventsBuffer: failed, + FailedEventsCounter: &Counter{}, + EmittedEventsBuffer: events, + EmittedEventsCounter: &Counter{}, + TrimmedEventsBuffer: trimmed, + TrimmedEventsCounter: &Counter{}, + EventSizeBuffer: sizes, + EventSizeCounter: &Counter{}, + } + } + + updateCounter := func(metrics map[string]*dto.MetricFamily, metric string, counter *Counter, buf *utils.CircularBuffer) { + current := getCounterValue(metrics[metric]) + counter.UpdateFreq(current, period) + buf.Add(counter.GetFreq()) + } + + updateCounter(metrics, prometheus.BuildFQName("", "audit", "failed_emit_events"), prev.FailedEventsCounter, prev.FailedEventsBuffer) + updateCounter(metrics, prometheus.BuildFQName(teleport.MetricNamespace, "audit", "stored_trimmed_events"), prev.TrimmedEventsCounter, prev.TrimmedEventsBuffer) + + histogram := getHistogram(metrics[prometheus.BuildFQName(teleport.MetricNamespace, "", "audit_emitted_event_sizes")], atIndex(0)) + + prev.EmittedEventsCounter.UpdateFreq(histogram.Count, period) + prev.EmittedEventsBuffer.Add(prev.EmittedEventsCounter.GetFreq()) + + prev.EventSizeCounter.UpdateFreq(int64(histogram.Sum), period) + prev.EventSizeBuffer.Add(prev.EventSizeCounter.GetFreq()) + + return &AuditStats{ + FailedEventsBuffer: prev.FailedEventsBuffer, + FailedEventsCounter: prev.FailedEventsCounter, + EmittedEventsBuffer: prev.EmittedEventsBuffer, + EmittedEventsCounter: prev.EmittedEventsCounter, + TrimmedEventsBuffer: prev.TrimmedEventsBuffer, + TrimmedEventsCounter: prev.TrimmedEventsCounter, + EventSizeBuffer: prev.EventSizeBuffer, + EventSizeCounter: prev.EventSizeCounter, + } +} + func getRemoteClusters(metric *dto.MetricFamily) []RemoteCluster { if metric == nil || metric.GetType() != dto.MetricType_GAUGE || len(metric.Metric) == 0 { return nil diff --git a/tool/tsh/common/latency.go b/tool/tsh/common/latency.go index 6089f58c7bd44..6c73bd73d623b 100644 --- a/tool/tsh/common/latency.go +++ b/tool/tsh/common/latency.go @@ -151,6 +151,7 @@ func (m *latencyModel) View() string { [][]float64{clientData, serverData}, asciigraph.Height(m.h-4), asciigraph.Width(m.w), + asciigraph.UpperBound(1), asciigraph.SeriesColors(clientColor, serverColor), asciigraph.Caption(legend), ) From 476c9e40f1fd428009fab9869c6ec6e99276c7e6 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Thu, 30 Jan 2025 16:27:14 +0000 Subject: [PATCH 02/28] DiscoveryConfigStatus: update counters once per iteration (#51647) DiscoveryService can process DiscoveryConfig matchers and will update its Status field depending on the number of found resources. Currently, it was updating them in real-time-ish: every time it found a new resource, it would update the counter in memory and propagate that change into the cluster shared storage. However, this resulted in counters flutuating a lot and causing confusion from the user's point of view (being at 0 every couple of minutes). Instead, this PR, ensures we update the values only once per iteration cycle (happens at discovery_service.poll_interval - defaults to 5 miunutes). This means we lose real-time observation of the counters, but we gain stability on the values which should help UX. --- lib/srv/discovery/database_watcher.go | 14 ++--- lib/srv/discovery/discovery.go | 29 ++-------- lib/srv/discovery/discovery_test.go | 55 +++++++++++++------ lib/srv/discovery/kube_integration_watcher.go | 16 ++---- lib/srv/discovery/status.go | 2 - lib/srv/server/azure_watcher.go | 2 + lib/srv/server/ec2_watcher.go | 2 + lib/srv/server/gcp_watcher.go | 2 + lib/srv/server/watcher.go | 15 ++++- 9 files changed, 71 insertions(+), 66 deletions(-) diff --git a/lib/srv/discovery/database_watcher.go b/lib/srv/discovery/database_watcher.go index 75c306c9a6306..d9b52d3671081 100644 --- a/lib/srv/discovery/database_watcher.go +++ b/lib/srv/discovery/database_watcher.go @@ -85,7 +85,6 @@ func (s *Server) startDatabaseWatchers() error { go func() { for { - discoveryConfigsChanged := map[string]struct{}{} resourcesFoundByGroup := make(map[awsResourceGroup]int) select { @@ -99,7 +98,6 @@ func (s *Server) startDatabaseWatchers() error { resourceGroup := awsResourceGroupFromLabels(db.GetStaticLabels()) resourcesFoundByGroup[resourceGroup] += 1 - discoveryConfigsChanged[resourceGroup.discoveryConfigName] = struct{}{} dbs = append(dbs, db) @@ -136,9 +134,6 @@ func (s *Server) startDatabaseWatchers() error { return } - for dc := range discoveryConfigsChanged { - s.updateDiscoveryConfigStatus(dc) - } s.upsertTasksForAWSRDSFailedEnrollments() } }() @@ -203,16 +198,15 @@ func (s *Server) databaseWatcherIterationStarted() { }, ) - for _, g := range awsResultGroups { - s.awsRDSResourcesStatus.iterationStarted(g) - } - discoveryConfigs := slices.FilterMapUnique(awsResultGroups, func(g awsResourceGroup) (s string, include bool) { return g.discoveryConfigName, true }) s.updateDiscoveryConfigStatus(discoveryConfigs...) - s.awsRDSResourcesStatus.reset() + for _, g := range awsResultGroups { + s.awsRDSResourcesStatus.iterationStarted(g) + } + s.awsRDSTasks.reset() } diff --git a/lib/srv/discovery/discovery.go b/lib/srv/discovery/discovery.go index 0f2617fb3f140..b5c9b14df95ec 100644 --- a/lib/srv/discovery/discovery.go +++ b/lib/srv/discovery/discovery.go @@ -584,6 +584,7 @@ func (s *Server) initAWSWatchers(matchers []types.AWSMatcher) error { server.WithPollInterval(s.PollInterval), server.WithTriggerFetchC(s.newDiscoveryConfigChangedSub()), server.WithPreFetchHookFn(s.ec2WatcherIterationStarted), + server.WithClock(s.clock), ) if err != nil { return trace.Wrap(err) @@ -643,15 +644,14 @@ func (s *Server) ec2WatcherIterationStarted() { return resourceGroup, include }, ) - for _, g := range awsResultGroups { - s.awsEC2ResourcesStatus.iterationStarted(g) - } - discoveryConfigs := libslices.FilterMapUnique(awsResultGroups, func(g awsResourceGroup) (s string, include bool) { return g.discoveryConfigName, true }) s.updateDiscoveryConfigStatus(discoveryConfigs...) s.awsEC2ResourcesStatus.reset() + for _, g := range awsResultGroups { + s.awsEC2ResourcesStatus.iterationStarted(g) + } s.awsEC2Tasks.reset() } @@ -799,15 +799,7 @@ func (s *Server) initAzureWatchers(ctx context.Context, matchers []types.AzureMa s.ctx, s.getAllAzureServerFetchers, server.WithPollInterval(s.PollInterval), server.WithTriggerFetchC(s.newDiscoveryConfigChangedSub()), - server.WithPreFetchHookFn(func() { - discoveryConfigs := libslices.FilterMapUnique( - s.getAllAzureServerFetchers(), - func(f server.Fetcher) (s string, include bool) { - return f.GetDiscoveryConfigName(), f.GetDiscoveryConfigName() != "" - }, - ) - s.updateDiscoveryConfigStatus(discoveryConfigs...) - }), + server.WithClock(s.clock), ) if err != nil { return trace.Wrap(err) @@ -866,15 +858,7 @@ func (s *Server) initGCPServerWatcher(ctx context.Context, vmMatchers []types.GC s.ctx, s.getAllGCPServerFetchers, server.WithPollInterval(s.PollInterval), server.WithTriggerFetchC(s.newDiscoveryConfigChangedSub()), - server.WithPreFetchHookFn(func() { - discoveryConfigs := libslices.FilterMapUnique( - s.getAllGCPServerFetchers(), - func(f server.Fetcher) (s string, include bool) { - return f.GetDiscoveryConfigName(), f.GetDiscoveryConfigName() != "" - }, - ) - s.updateDiscoveryConfigStatus(discoveryConfigs...) - }), + server.WithClock(s.clock), ) if err != nil { return trace.Wrap(err) @@ -1302,7 +1286,6 @@ func (s *Server) handleEC2Discovery() { s.logHandleInstancesErr(err) } - s.updateDiscoveryConfigStatus(instances.EC2.DiscoveryConfigName) s.upsertTasksForAWSEC2FailedEnrollments() case <-s.ctx.Done(): s.ec2Watcher.Stop() diff --git a/lib/srv/discovery/discovery_test.go b/lib/srv/discovery/discovery_test.go index 40c3ed6615e0e..dcde4aa6d386b 100644 --- a/lib/srv/discovery/discovery_test.go +++ b/lib/srv/discovery/discovery_test.go @@ -976,12 +976,22 @@ func TestDiscoveryServer(t *testing.T) { if tc.wantDiscoveryConfigStatus != nil { // It can take a while for the status to be updated. require.Eventually(t, func() bool { + fakeClock.Advance(server.PollInterval) storedDiscoveryConfig, err := tlsServer.Auth().DiscoveryConfigs.GetDiscoveryConfig(ctx, tc.discoveryConfig.GetName()) require.NoError(t, err) if len(storedDiscoveryConfig.Status.IntegrationDiscoveredResources) == 0 { return false } - require.Equal(t, *tc.wantDiscoveryConfigStatus, storedDiscoveryConfig.Status) + want := *tc.wantDiscoveryConfigStatus + got := storedDiscoveryConfig.Status + + require.Equal(t, want.State, got.State) + require.Equal(t, want.DiscoveredResources, got.DiscoveredResources) + require.Equal(t, want.ErrorMessage, got.ErrorMessage) + for expectedKey, expectedValue := range want.IntegrationDiscoveredResources { + require.Contains(t, got.IntegrationDiscoveredResources, expectedKey) + require.Equal(t, expectedValue, got.IntegrationDiscoveredResources[expectedKey]) + } return true }, 500*time.Millisecond, 50*time.Millisecond) } @@ -2175,16 +2185,17 @@ func TestDiscoveryDatabase(t *testing.T) { } tcs := []struct { - name string - existingDatabases []types.Database - integrationsOnlyCredentials bool - awsMatchers []types.AWSMatcher - azureMatchers []types.AzureMatcher - expectDatabases []types.Database - discoveryConfigs func(*testing.T) []*discoveryconfig.DiscoveryConfig - discoveryConfigStatusCheck func(*testing.T, discoveryconfig.Status) - userTasksCheck func(*testing.T, []*usertasksv1.UserTask) - wantEvents int + name string + existingDatabases []types.Database + integrationsOnlyCredentials bool + awsMatchers []types.AWSMatcher + azureMatchers []types.AzureMatcher + expectDatabases []types.Database + discoveryConfigs func(*testing.T) []*discoveryconfig.DiscoveryConfig + discoveryConfigStatusCheck func(*testing.T, discoveryconfig.Status) + discoveryConfigStatusExpectedResources int + userTasksCheck func(*testing.T, []*usertasksv1.UserTask) + wantEvents int }{ { name: "discover AWS database", @@ -2383,11 +2394,11 @@ func TestDiscoveryDatabase(t *testing.T) { }, wantEvents: 1, discoveryConfigStatusCheck: func(t *testing.T, s discoveryconfig.Status) { - require.Equal(t, uint64(1), s.DiscoveredResources) require.Equal(t, uint64(1), s.IntegrationDiscoveredResources[integrationName].AwsRds.Enrolled) require.Equal(t, uint64(1), s.IntegrationDiscoveredResources[integrationName].AwsRds.Found) require.Zero(t, s.IntegrationDiscoveredResources[integrationName].AwsRds.Failed) }, + discoveryConfigStatusExpectedResources: 1, }, { name: "running in integrations-only-mode with a matcher without an integration, must find 1 database", @@ -2417,10 +2428,10 @@ func TestDiscoveryDatabase(t *testing.T) { expectDatabases: []types.Database{}, wantEvents: 0, discoveryConfigStatusCheck: func(t *testing.T, s discoveryconfig.Status) { - require.Equal(t, uint64(1), s.DiscoveredResources) require.Equal(t, uint64(1), s.IntegrationDiscoveredResources[integrationName].AwsEks.Found) require.Zero(t, s.IntegrationDiscoveredResources[integrationName].AwsEks.Enrolled) }, + discoveryConfigStatusExpectedResources: 1, }, { name: "discovery config status must be updated even when there are no resources", @@ -2439,9 +2450,9 @@ func TestDiscoveryDatabase(t *testing.T) { expectDatabases: []types.Database{}, wantEvents: 0, discoveryConfigStatusCheck: func(t *testing.T, s discoveryconfig.Status) { - require.Equal(t, uint64(0), s.DiscoveredResources) require.Equal(t, "DISCOVERY_CONFIG_STATE_SYNCING", s.State) }, + discoveryConfigStatusExpectedResources: 0, }, { name: "discover-rds user task must be created when database is not configured to allow IAM DB Authentication", @@ -2485,6 +2496,7 @@ func TestDiscoveryDatabase(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { t.Parallel() + fakeClock := clockwork.NewFakeClock() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -2578,6 +2590,7 @@ func TestDiscoveryDatabase(t *testing.T) { waitForReconcile <- struct{}{} }, DiscoveryGroup: mainDiscoveryGroup, + clock: fakeClock, }) require.NoError(t, err) @@ -2627,10 +2640,18 @@ func TestDiscoveryDatabase(t *testing.T) { } if tc.discoveryConfigStatusCheck != nil { - dc, err := tlsServer.Auth().GetDiscoveryConfig(ctx, discoveryConfigName) - require.NoError(t, err) + require.Eventually(t, func() bool { + fakeClock.Advance(srv.PollInterval * 2) + dc, err := tlsServer.Auth().GetDiscoveryConfig(ctx, discoveryConfigName) + require.NoError(t, err) + if tc.discoveryConfigStatusExpectedResources != int(dc.Status.DiscoveredResources) { + return false + } + + tc.discoveryConfigStatusCheck(t, dc.Status) + return true + }, time.Second, 100*time.Millisecond) - tc.discoveryConfigStatusCheck(t, dc.Status) } if tc.userTasksCheck != nil { var userTasks []*usertasksv1.UserTask diff --git a/lib/srv/discovery/kube_integration_watcher.go b/lib/srv/discovery/kube_integration_watcher.go index 88d89f258f8c4..c1332af68c81a 100644 --- a/lib/srv/discovery/kube_integration_watcher.go +++ b/lib/srv/discovery/kube_integration_watcher.go @@ -78,6 +78,7 @@ func (s *Server) startKubeIntegrationWatchers() error { Origin: types.OriginCloud, TriggerFetchC: s.newDiscoveryConfigChangedSub(), PreFetchHookFn: s.kubernetesIntegrationWatcherIterationStarted, + Clock: s.clock, }) if err != nil { return trace.Wrap(err) @@ -86,7 +87,6 @@ func (s *Server) startKubeIntegrationWatchers() error { go func() { for { - discoveryConfigsChanged := map[string]struct{}{} resourcesFoundByGroup := make(map[awsResourceGroup]int) resourcesEnrolledByGroup := make(map[awsResourceGroup]int) @@ -124,7 +124,6 @@ func (s *Server) startKubeIntegrationWatchers() error { resourceGroup := awsResourceGroupFromLabels(newCluster.GetStaticLabels()) resourcesFoundByGroup[resourceGroup] += 1 - discoveryConfigsChanged[resourceGroup.discoveryConfigName] = struct{}{} if enrollingClusters[newCluster.GetAWSConfig().Name] || slices.ContainsFunc(existingServers, func(c types.KubeServer) bool { return c.GetName() == newCluster.GetName() }) || @@ -175,10 +174,6 @@ func (s *Server) startKubeIntegrationWatchers() error { for group, count := range resourcesEnrolledByGroup { s.awsEKSResourcesStatus.incrementEnrolled(group, count) } - - for dc := range discoveryConfigsChanged { - s.updateDiscoveryConfigStatus(dc) - } } }() return nil @@ -203,16 +198,16 @@ func (s *Server) kubernetesIntegrationWatcherIterationStarted() { return resourceGroup, include }, ) - for _, g := range awsResultGroups { - s.awsEKSResourcesStatus.iterationStarted(g) - } discoveryConfigs := libslices.FilterMapUnique(awsResultGroups, func(g awsResourceGroup) (s string, include bool) { return g.discoveryConfigName, true }) s.updateDiscoveryConfigStatus(discoveryConfigs...) - s.awsEKSResourcesStatus.reset() + for _, g := range awsResultGroups { + s.awsEKSResourcesStatus.iterationStarted(g) + } + s.awsEKSTasks.reset() } @@ -232,7 +227,6 @@ func (s *Server) enrollEKSClusters(region, integration, discoveryConfigName stri } mu.Unlock() - s.updateDiscoveryConfigStatus(discoveryConfigName) s.upsertTasksForAWSEKSFailedEnrollments() }() diff --git a/lib/srv/discovery/status.go b/lib/srv/discovery/status.go index 4b25bff187540..642deb7244c85 100644 --- a/lib/srv/discovery/status.go +++ b/lib/srv/discovery/status.go @@ -355,8 +355,6 @@ func (s *Server) ReportEC2SSMInstallationResult(ctx context.Context, result *ser integration: result.IntegrationName, }, 1) - s.updateDiscoveryConfigStatus(result.DiscoveryConfigName) - s.awsEC2Tasks.addFailedEnrollment( awsEC2TaskKey{ integration: result.IntegrationName, diff --git a/lib/srv/server/azure_watcher.go b/lib/srv/server/azure_watcher.go index fb1110247dc0f..3ebca31e2710f 100644 --- a/lib/srv/server/azure_watcher.go +++ b/lib/srv/server/azure_watcher.go @@ -25,6 +25,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" usageeventsv1 "github.com/gravitational/teleport/api/gen/proto/go/usageevents/v1" "github.com/gravitational/teleport/api/types" @@ -86,6 +87,7 @@ func NewAzureWatcher(ctx context.Context, fetchersFn func() []Fetcher, opts ...O ctx: cancelCtx, cancel: cancelFn, pollInterval: time.Minute, + clock: clockwork.NewRealClock(), triggerFetchC: make(<-chan struct{}), InstancesC: make(chan Instances), } diff --git a/lib/srv/server/ec2_watcher.go b/lib/srv/server/ec2_watcher.go index 1f81fb3d6952a..d189f628aa11c 100644 --- a/lib/srv/server/ec2_watcher.go +++ b/lib/srv/server/ec2_watcher.go @@ -28,6 +28,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" usageeventsv1 "github.com/gravitational/teleport/api/gen/proto/go/usageevents/v1" "github.com/gravitational/teleport/api/types" @@ -177,6 +178,7 @@ func NewEC2Watcher(ctx context.Context, fetchersFn func() []Fetcher, missedRotat fetchersFn: fetchersFn, ctx: cancelCtx, cancel: cancelFn, + clock: clockwork.NewRealClock(), pollInterval: time.Minute, triggerFetchC: make(<-chan struct{}), InstancesC: make(chan Instances), diff --git a/lib/srv/server/gcp_watcher.go b/lib/srv/server/gcp_watcher.go index e3cf33c591d49..8c4396c856610 100644 --- a/lib/srv/server/gcp_watcher.go +++ b/lib/srv/server/gcp_watcher.go @@ -25,6 +25,7 @@ import ( "time" "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" usageeventsv1 "github.com/gravitational/teleport/api/gen/proto/go/usageevents/v1" "github.com/gravitational/teleport/api/types" @@ -78,6 +79,7 @@ func NewGCPWatcher(ctx context.Context, fetchersFn func() []Fetcher, opts ...Opt fetchersFn: fetchersFn, ctx: cancelCtx, cancel: cancelFn, + clock: clockwork.NewRealClock(), pollInterval: time.Minute, triggerFetchC: make(<-chan struct{}), InstancesC: make(chan Instances), diff --git a/lib/srv/server/watcher.go b/lib/srv/server/watcher.go index 5b10097b0e045..53409171c2ba0 100644 --- a/lib/srv/server/watcher.go +++ b/lib/srv/server/watcher.go @@ -24,6 +24,7 @@ import ( "time" "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" "github.com/gravitational/teleport/api/types" ) @@ -64,6 +65,13 @@ func WithPreFetchHookFn(f func()) Option { } } +// WithClock sets a clock that is used to periodically fetch new resources. +func WithClock(clock clockwork.Clock) Option { + return func(w *Watcher) { + w.clock = clock + } +} + // Watcher allows callers to discover cloud instances matching specified filters. type Watcher struct { // InstancesC can be used to consume newly discovered instances. @@ -72,6 +80,7 @@ type Watcher struct { fetchersFn func() []Fetcher pollInterval time.Duration + clock clockwork.Clock triggerFetchC <-chan struct{} ctx context.Context cancel context.CancelFunc @@ -107,7 +116,7 @@ func (w *Watcher) fetchAndSubmit() { // Run starts the watcher's main watch loop. func (w *Watcher) Run() { - pollTimer := time.NewTimer(w.pollInterval) + pollTimer := w.clock.NewTimer(w.pollInterval) defer pollTimer.Stop() if w.triggerFetchC == nil { @@ -123,7 +132,7 @@ func (w *Watcher) Run() { w.sendInstancesOrLogError(fetcher.GetMatchingInstances(insts, true)) } - case <-pollTimer.C: + case <-pollTimer.Chan(): w.fetchAndSubmit() pollTimer.Reset(w.pollInterval) @@ -132,7 +141,7 @@ func (w *Watcher) Run() { // stop and drain timer if !pollTimer.Stop() { - <-pollTimer.C + <-pollTimer.Chan() } pollTimer.Reset(w.pollInterval) From 5fb00a4aeed215225505e23f2d812157af4976f2 Mon Sep 17 00:00:00 2001 From: Gavin Frazar Date: Thu, 30 Jan 2025 08:51:33 -0800 Subject: [PATCH 03/28] Migrate AWS session to SDK v2 (#51626) Only DynamoDB and AWS MongoDB Atlas depended on GetAWSSession, and the migration for these packages was trivial. Since this was the last AWS method in lib/cloud/clients, the vast majority of the changes are to remove dead code. --- lib/auth/auth.go | 7 - lib/auth/init.go | 4 - lib/cloud/clients.go | 388 +----------------- lib/cloud/clients_test.go | 114 ----- lib/cloud/mocks/aws_config.go | 5 +- lib/service/service.go | 6 - lib/srv/db/access_test.go | 2 - lib/srv/db/cloud/aws.go | 6 - lib/srv/db/cloud/iam.go | 11 - lib/srv/db/cloud/iam_test.go | 36 +- lib/srv/db/cloud/meta.go | 10 - lib/srv/db/cloud/meta_test.go | 7 - lib/srv/db/cloud/resource_checker.go | 8 +- .../db/cloud/resource_checker_credentials.go | 2 +- lib/srv/db/cloud/resource_checker_url.go | 3 - .../db/cloud/resource_checker_url_aws_test.go | 13 - lib/srv/db/cloud/users/users.go | 10 - lib/srv/db/cloud/users/users_test.go | 2 - lib/srv/db/common/auth.go | 22 +- lib/srv/db/common/auth_test.go | 10 +- lib/srv/db/common/engines.go | 8 +- lib/srv/db/common/engines_test.go | 2 +- lib/srv/db/dynamodb/engine.go | 28 +- lib/srv/db/dynamodb/test.go | 4 +- lib/srv/db/dynamodb_test.go | 38 +- lib/srv/db/mysql/engine.go | 2 +- lib/srv/db/objects/fetcher.go | 8 +- lib/srv/db/objects/importer.go | 8 +- lib/srv/db/objects/objects.go | 6 +- lib/srv/db/postgres/connector.go | 8 +- lib/srv/db/postgres/engine.go | 9 +- lib/srv/db/postgres/objects.go | 6 +- lib/srv/db/postgres/users.go | 8 +- lib/srv/db/server.go | 10 +- lib/srv/db/watcher_test.go | 8 - lib/srv/discovery/access_graph_aws.go | 1 - lib/srv/discovery/access_graph_test.go | 4 +- lib/srv/discovery/discovery.go | 10 +- lib/srv/discovery/discovery_test.go | 9 - .../discovery/fetchers/aws-sync/aws-sync.go | 3 - lib/srv/discovery/fetchers/db/aws.go | 6 - .../db/aws_redshift_serverless_test.go | 4 +- lib/srv/discovery/fetchers/db/db.go | 6 - lib/srv/discovery/fetchers/db/helpers_test.go | 6 - lib/srv/server/azure_watcher_test.go | 2 +- 45 files changed, 99 insertions(+), 771 deletions(-) delete mode 100644 lib/cloud/clients_test.go diff --git a/lib/auth/auth.go b/lib/auth/auth.go index 5a1dfcbf796b2..ef4ad7b892e17 100644 --- a/lib/auth/auth.go +++ b/lib/auth/auth.go @@ -92,7 +92,6 @@ import ( "github.com/gravitational/teleport/lib/bitbucket" "github.com/gravitational/teleport/lib/cache" "github.com/gravitational/teleport/lib/circleci" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/devicetrust/assertserver" @@ -373,12 +372,6 @@ func NewServer(cfg *InitConfig, opts ...ServerOption) (*Server, error) { return nil, trace.Wrap(err) } } - if cfg.CloudClients == nil { - cfg.CloudClients, err = cloud.NewClients() - if err != nil { - return nil, trace.Wrap(err) - } - } if cfg.Notifications == nil { cfg.Notifications, err = local.NewNotificationsService(cfg.Backend, cfg.Clock) if err != nil { diff --git a/lib/auth/init.go b/lib/auth/init.go index 2e78c94f4a91c..97a82ac403434 100644 --- a/lib/auth/init.go +++ b/lib/auth/init.go @@ -57,7 +57,6 @@ import ( "github.com/gravitational/teleport/lib/auth/migration" "github.com/gravitational/teleport/lib/auth/state" "github.com/gravitational/teleport/lib/backend" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/modules" @@ -302,9 +301,6 @@ type InitConfig struct { // AccessMonitoringRules is a service that manages access monitoring rules. AccessMonitoringRules services.AccessMonitoringRules - // CloudClients provides clients for various cloud providers. - CloudClients cloud.Clients - // KubeWaitingContainers is a service that manages // Kubernetes ephemeral containers that are waiting // to be created until moderated session conditions are met. diff --git a/lib/cloud/clients.go b/lib/cloud/clients.go index 0c8fe0306a63c..1dbe9539acefe 100644 --- a/lib/cloud/clients.go +++ b/lib/cloud/clients.go @@ -21,9 +21,7 @@ package cloud import ( "context" "io" - "log/slog" "sync" - "time" gcpcredentials "cloud.google.com/go/iam/credentials/apiv1" "github.com/Azure/azure-sdk-for-go/sdk/azcore" @@ -33,29 +31,17 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/mysql/armmysql" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresql" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/subscription/armsubscription" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - awssession "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/gravitational/trace" "google.golang.org/api/option" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - "github.com/gravitational/teleport/api/types" - libcloudaws "github.com/gravitational/teleport/lib/cloud/aws" "github.com/gravitational/teleport/lib/cloud/azure" "github.com/gravitational/teleport/lib/cloud/gcp" "github.com/gravitational/teleport/lib/cloud/imds" awsimds "github.com/gravitational/teleport/lib/cloud/imds/aws" azureimds "github.com/gravitational/teleport/lib/cloud/imds/azure" gcpimds "github.com/gravitational/teleport/lib/cloud/imds/gcp" - "github.com/gravitational/teleport/lib/modules" - "github.com/gravitational/teleport/lib/utils" ) // Clients provides interface for obtaining cloud provider clients. @@ -65,8 +51,6 @@ type Clients interface { GetInstanceMetadataClient(ctx context.Context) (imds.Client, error) // GCPClients is an interface for providing GCP API clients. GCPClients - // AWSClients is an interface for providing AWS API clients. - AWSClients // AzureClients is an interface for Azure-specific API clients AzureClients // Closer closes all initialized clients. @@ -87,12 +71,6 @@ type GCPClients interface { GetGCPInstancesClient(context.Context) (gcp.InstancesClient, error) } -// AWSClients is an interface for providing AWS API clients. -type AWSClients interface { - // GetAWSSession returns AWS session for the specified region and any role(s). - GetAWSSession(ctx context.Context, region string, opts ...AWSOptionsFn) (*awssession.Session, error) -} - // AzureClients is an interface for Azure-specific API clients type AzureClients interface { // GetAzureCredential returns Azure default token credential chain. @@ -199,23 +177,16 @@ type ClientsOption func(cfg *cloudClients) // NewClients returns a new instance of cloud clients retriever. func NewClients(opts ...ClientsOption) (Clients, error) { - awsSessionsCache, err := utils.NewFnCache(utils.FnCacheConfig{ - TTL: 15 * time.Minute, - }) - if err != nil { - return nil, trace.Wrap(err) - } azClients, err := newAzureClients() if err != nil { return nil, trace.Wrap(err) } cloudClients := &cloudClients{ - awsSessionsCache: awsSessionsCache, gcpClients: gcpClients{ - gcpSQLAdmin: newClientCache[gcp.SQLAdminClient](gcp.NewSQLAdminClient), - gcpGKE: newClientCache[gcp.GKEClient](gcp.NewGKEClient), - gcpProjects: newClientCache[gcp.ProjectsClient](gcp.NewProjectsClient), - gcpInstances: newClientCache[gcp.InstancesClient](gcp.NewInstancesClient), + gcpSQLAdmin: newClientCache(gcp.NewSQLAdminClient), + gcpGKE: newClientCache(gcp.NewGKEClient), + gcpProjects: newClientCache(gcp.NewProjectsClient), + gcpInstances: newClientCache(gcp.NewInstancesClient), }, azureClients: azClients, } @@ -230,31 +201,7 @@ func NewClients(opts ...ClientsOption) (Clients, error) { // cloudClients implements Clients var _ Clients = (*cloudClients)(nil) -// WithAWSIntegrationSessionProvider sets an integration session generator for AWS apis. -// If a client is requested for a specific Integration, instead of using the ambient credentials, this generator is used to fetch the AWS Session. -func WithAWSIntegrationSessionProvider(sessionProvider AWSIntegrationSessionProvider) func(*cloudClients) { - return func(cc *cloudClients) { - cc.awsIntegrationSessionProviderFn = sessionProvider - } -} - -// AWSIntegrationSessionProvider defines a function that creates an [awssession.Session] from a Region and an Integration. -// This is used to generate aws sessions for clients that must use an Integration instead of ambient credentials. -type AWSIntegrationSessionProvider func(ctx context.Context, region string, integration string) (*awssession.Session, error) - -type awsSessionCacheKey struct { - region string - integration string - roleARN string - externalID string -} - type cloudClients struct { - // awsSessionsCache is a cache of AWS sessions, where the cache key is - // an instance of awsSessionCacheKey. - awsSessionsCache *utils.FnCache - // awsIntegrationSessionProviderFn is a AWS Session Generator that uses an Integration to generate an AWS Session. - awsIntegrationSessionProviderFn AWSIntegrationSessionProvider // instanceMetadata is the cached instance metadata client. instanceMetadata imds.Client // gcpClients contains GCP-specific clients. @@ -316,156 +263,6 @@ type azureClients struct { azureRoleAssignmentsClients azure.ClientMap[azure.RoleAssignmentsClient] } -// credentialsSource defines where the credentials must come from. -type credentialsSource int - -const ( - // credentialsSourceAmbient uses the default Cloud SDK method to load the credentials. - credentialsSourceAmbient = iota + 1 - // credentialsSourceIntegration uses an Integration to load the credentials. - credentialsSourceIntegration -) - -// awsOptions a struct of additional options for assuming an AWS role -// when construction an underlying AWS session. -type awsOptions struct { - // baseSession is a session to use instead of the default session for an - // AWS region, which is used to enable role chaining. - baseSession *awssession.Session - // assumeRoleARN is the AWS IAM Role ARN to assume. - assumeRoleARN string - // assumeRoleExternalID is used to assume an external AWS IAM Role. - assumeRoleExternalID string - - // credentialsSource describes which source to use to fetch credentials. - credentialsSource credentialsSource - - // integration is the name of the integration to be used to fetch the credentials. - integration string - - // customRetryer is a custom retryer to use for the session. - customRetryer request.Retryer - - // maxRetries is the maximum number of retries to use for the session. - maxRetries *int - - // withoutSessionCache disables the session cache for the AWS session. - withoutSessionCache bool -} - -func (a *awsOptions) checkAndSetDefaults() error { - switch a.credentialsSource { - case credentialsSourceAmbient: - if a.integration != "" { - return trace.BadParameter("integration and ambient credentials cannot be used at the same time") - } - case credentialsSourceIntegration: - if a.integration == "" { - return trace.BadParameter("missing integration name") - } - default: - return trace.BadParameter("missing credentials source (ambient or integration)") - } - - return nil -} - -// AWSOptionsFn is an option function for setting additional options -// when getting an AWS session. -type AWSOptionsFn func(*awsOptions) - -// WithAssumeRole configures options needed for assuming an AWS role. -func WithAssumeRole(roleARN, externalID string) AWSOptionsFn { - return func(options *awsOptions) { - options.assumeRoleARN = roleARN - options.assumeRoleExternalID = externalID - } -} - -// WithoutSessionCache disables the session cache for the AWS session. -func WithoutSessionCache() AWSOptionsFn { - return func(options *awsOptions) { - options.withoutSessionCache = true - } -} - -// WithAssumeRoleFromAWSMeta extracts options needed from AWS metadata for -// assuming an AWS role. -func WithAssumeRoleFromAWSMeta(meta types.AWS) AWSOptionsFn { - return WithAssumeRole(meta.AssumeRoleARN, meta.ExternalID) -} - -// WithChainedAssumeRole sets a role to assume with a base session to use -// for assuming the role, which enables role chaining. -func WithChainedAssumeRole(session *awssession.Session, roleARN, externalID string) AWSOptionsFn { - return func(options *awsOptions) { - options.baseSession = session - options.assumeRoleARN = roleARN - options.assumeRoleExternalID = externalID - } -} - -// WithRetryer sets a custom retryer for the session. -func WithRetryer(retryer request.Retryer) AWSOptionsFn { - return func(options *awsOptions) { - options.customRetryer = retryer - } -} - -// WithMaxRetries sets the maximum allowed value for the sdk to keep retrying. -func WithMaxRetries(maxRetries int) AWSOptionsFn { - return func(options *awsOptions) { - options.maxRetries = &maxRetries - } -} - -// WithCredentialsMaybeIntegration sets the credential source to be -// - ambient if the integration is an empty string -// - integration, otherwise -func WithCredentialsMaybeIntegration(integration string) AWSOptionsFn { - if integration != "" { - return withIntegrationCredentials(integration) - } - - return WithAmbientCredentials() -} - -// withIntegrationCredentials configures options with an Integration that must be used to fetch Credentials to assume a role. -// This prevents the usage of AWS environment credentials. -func withIntegrationCredentials(integration string) AWSOptionsFn { - return func(options *awsOptions) { - options.credentialsSource = credentialsSourceIntegration - options.integration = integration - } -} - -// WithAmbientCredentials configures options to use the ambient credentials. -func WithAmbientCredentials() AWSOptionsFn { - return func(options *awsOptions) { - options.credentialsSource = credentialsSourceAmbient - } -} - -// GetAWSSession returns AWS session for the specified region, optionally -// assuming AWS IAM Roles. -func (c *cloudClients) GetAWSSession(ctx context.Context, region string, opts ...AWSOptionsFn) (*awssession.Session, error) { - var options awsOptions - for _, opt := range opts { - opt(&options) - } - var err error - if options.baseSession == nil { - options.baseSession, err = c.getAWSSessionForRegion(ctx, region, options) - if err != nil { - return nil, trace.Wrap(err) - } - } - if options.assumeRoleARN == "" { - return options.baseSession, nil - } - return c.getAWSSessionForRole(ctx, region, options) -} - // GetGCPIAMClient returns GCP IAM client. func (c *cloudClients) GetGCPIAMClient(ctx context.Context) (*gcpcredentials.IamCredentialsClient, error) { c.mtx.RLock() @@ -627,105 +424,6 @@ func (c *cloudClients) Close() (err error) { return trace.Wrap(err) } -// awsAmbientSessionProvider loads a new session using the environment variables. -// Describe in detail here: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials -func awsAmbientSessionProvider(ctx context.Context, region string) (*awssession.Session, error) { - awsSessionOptions := buildAWSSessionOptions(region, nil /* credentials */) - - session, err := awssession.NewSessionWithOptions(awsSessionOptions) - return session, trace.Wrap(err) -} - -// getAWSSessionForRegion returns AWS session for the specified region. -func (c *cloudClients) getAWSSessionForRegion(ctx context.Context, region string, opts awsOptions) (*awssession.Session, error) { - if err := opts.checkAndSetDefaults(); err != nil { - return nil, trace.Wrap(err) - } - - createSession := func(ctx context.Context) (*awssession.Session, error) { - if opts.credentialsSource == credentialsSourceIntegration { - if c.awsIntegrationSessionProviderFn == nil { - return nil, trace.BadParameter("missing aws integration session provider") - } - - slog.DebugContext(ctx, "Initializing AWS session", - "region", region, - "integration", opts.integration, - ) - session, err := c.awsIntegrationSessionProviderFn(ctx, region, opts.integration) - return session, trace.Wrap(err) - } - - slog.DebugContext(ctx, "Initializing AWS session using environment credentials", - "region", region, - ) - session, err := awsAmbientSessionProvider(ctx, region) - return session, trace.Wrap(err) - } - - if opts.withoutSessionCache { - sess, err := createSession(ctx) - if err != nil { - return nil, trace.Wrap(err) - } - if opts.customRetryer != nil || opts.maxRetries != nil { - return sess.Copy(&aws.Config{ - Retryer: opts.customRetryer, - MaxRetries: opts.maxRetries, - }), nil - } - return sess, trace.Wrap(err) - } - - cacheKey := awsSessionCacheKey{ - region: region, - integration: opts.integration, - } - - sess, err := utils.FnCacheGet(ctx, c.awsSessionsCache, cacheKey, func(ctx context.Context) (*awssession.Session, error) { - session, err := createSession(ctx) - return session, trace.Wrap(err) - }) - if err != nil { - return nil, trace.Wrap(err) - } - if opts.customRetryer != nil || opts.maxRetries != nil { - return sess.Copy(&aws.Config{ - Retryer: opts.customRetryer, - MaxRetries: opts.maxRetries, - }), nil - } - return sess, err -} - -// getAWSSessionForRole returns AWS session for the specified region and role. -func (c *cloudClients) getAWSSessionForRole(ctx context.Context, region string, options awsOptions) (*awssession.Session, error) { - if err := options.checkAndSetDefaults(); err != nil { - return nil, trace.Wrap(err) - } - - createSession := func(ctx context.Context) (*awssession.Session, error) { - stsClient := sts.New(options.baseSession) - return newSessionWithRole(ctx, stsClient, region, options.assumeRoleARN, options.assumeRoleExternalID) - } - - if options.withoutSessionCache { - session, err := createSession(ctx) - return session, trace.Wrap(err) - } - - cacheKey := awsSessionCacheKey{ - region: region, - integration: options.integration, - roleARN: options.assumeRoleARN, - externalID: options.assumeRoleExternalID, - } - return utils.FnCacheGet(ctx, c.awsSessionsCache, cacheKey, func(ctx context.Context) (*awssession.Session, error) { - session, err := createSession(ctx) - return session, trace.Wrap(err) - }) -} - func (c *cloudClients) initGCPIAMClient(ctx context.Context) (*gcpcredentials.IamCredentialsClient, error) { c.mtx.Lock() defer c.mtx.Unlock() @@ -891,7 +589,6 @@ var _ Clients = (*TestCloudClients)(nil) // TestCloudClients are used in tests. type TestCloudClients struct { - STS stsiface.STSAPI GCPSQL gcp.SQLAdminClient GCPGKE gcp.GKEClient GCPProjects gcp.ProjectsClient @@ -916,43 +613,6 @@ type TestCloudClients struct { AzureRoleAssignments azure.RoleAssignmentsClient } -// GetAWSSession returns AWS session for the specified region, optionally -// assuming AWS IAM Roles. -func (c *TestCloudClients) GetAWSSession(ctx context.Context, region string, opts ...AWSOptionsFn) (*awssession.Session, error) { - var options awsOptions - for _, opt := range opts { - opt(&options) - } - var err error - if options.baseSession == nil { - options.baseSession, err = c.getAWSSessionForRegion(region) - if err != nil { - return nil, trace.Wrap(err) - } - } - if options.assumeRoleARN == "" { - return options.baseSession, nil - } - return newSessionWithRole(ctx, c.STS, region, options.assumeRoleARN, options.assumeRoleExternalID) -} - -// GetAWSSession returns AWS session for the specified region. -func (c *TestCloudClients) getAWSSessionForRegion(region string) (*awssession.Session, error) { - useFIPSEndpoint := endpoints.FIPSEndpointStateUnset - if modules.GetModules().IsBoringBinary() { - useFIPSEndpoint = endpoints.FIPSEndpointStateEnabled - } - - return awssession.NewSession(&aws.Config{ - Credentials: credentials.NewCredentials(&credentials.StaticProvider{Value: credentials.Value{ - AccessKeyID: "fakeClientKeyID", - SecretAccessKey: "fakeClientSecret", - }}), - Region: aws.String(region), - UseFIPSEndpoint: useFIPSEndpoint, - }) -} - // GetGCPIAMClient returns GCP IAM client. func (c *TestCloudClients) GetGCPIAMClient(ctx context.Context) (*gcpcredentials.IamCredentialsClient, error) { return gcpcredentials.NewIamCredentialsClient(ctx, @@ -1075,43 +735,3 @@ func (c *TestCloudClients) GetAzureRoleAssignmentsClient(subscription string) (a func (c *TestCloudClients) Close() error { return nil } - -// newSessionWithRole assumes a given AWS IAM Role, passing an external ID if given, -// and returns a new AWS session with the assumed role in the given region. -func newSessionWithRole(ctx context.Context, svc stscreds.AssumeRoler, region, roleARN, externalID string) (*awssession.Session, error) { - slog.DebugContext(ctx, "Initializing AWS session for assumed role", - "assumed_role", roleARN, - "region", region, - ) - // Make a credentials with AssumeRoleProvider and test it out. - cred := stscreds.NewCredentialsWithClient(svc, roleARN, func(p *stscreds.AssumeRoleProvider) { - if externalID != "" { - p.ExternalID = aws.String(externalID) - } - }) - if _, err := cred.GetWithContext(ctx); err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) - } - - awsSessionOptions := buildAWSSessionOptions(region, cred) - - // Create a new session with the credentials. - roleSession, err := awssession.NewSessionWithOptions(awsSessionOptions) - return roleSession, trace.Wrap(err) -} - -func buildAWSSessionOptions(region string, cred *credentials.Credentials) awssession.Options { - useFIPSEndpoint := endpoints.FIPSEndpointStateUnset - if modules.GetModules().IsBoringBinary() { - useFIPSEndpoint = endpoints.FIPSEndpointStateEnabled - } - - return awssession.Options{ - SharedConfigState: awssession.SharedConfigEnable, - Config: aws.Config{ - Region: aws.String(region), - Credentials: cred, - UseFIPSEndpoint: useFIPSEndpoint, - }, - } -} diff --git a/lib/cloud/clients_test.go b/lib/cloud/clients_test.go deleted file mode 100644 index 483b7bd5c5f0e..0000000000000 --- a/lib/cloud/clients_test.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package cloud - -import ( - "context" - "testing" - - "github.com/aws/aws-sdk-go/aws" - awssession "github.com/aws/aws-sdk-go/aws/session" - "github.com/gravitational/trace" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestClientGetAWSSessionIntegration(t *testing.T) { - dummyIntegration := "integration-test" - dummyRegion := "test-region-123" - - t.Run("without an integration session provider, must return a missing aws integration session provider error", func(t *testing.T) { - ctx := context.Background() - - clients, err := NewClients() - require.NoError(t, err) - - t.Cleanup(func() { require.NoError(t, clients.Close()) }) - - _, err = clients.GetAWSSession(ctx, "us-region-2", WithCredentialsMaybeIntegration("integration-test")) - require.True(t, trace.IsBadParameter(err), "expected err to be BadParameter, got %+v", err) - require.ErrorContains(t, err, "missing aws integration session provider") - }) - - t.Run("with an integration session provider, must return the session", func(t *testing.T) { - ctx := context.Background() - dummySession := &awssession.Session{ - Config: &aws.Config{ - Region: &dummyRegion, - }, - } - - clients, err := NewClients(WithAWSIntegrationSessionProvider(func(ctx context.Context, region, integration string) (*awssession.Session, error) { - assert.Equal(t, dummyIntegration, integration) - assert.Equal(t, dummyRegion, region) - return dummySession, nil - })) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, clients.Close()) }) - - sess, err := clients.GetAWSSession(ctx, dummyRegion, WithCredentialsMaybeIntegration("integration-test")) - require.NoError(t, err) - require.Equal(t, dummySession, sess) - }) - - t.Run("with an integration session provider, but using an empty integration falls back to ambient credentials, must not call the integration session provider", func(t *testing.T) { - ctx := context.Background() - - clients, err := NewClients(WithAWSIntegrationSessionProvider(func(ctx context.Context, region, integration string) (*awssession.Session, error) { - assert.Fail(t, "should not be called") - return nil, nil - })) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, clients.Close()) }) - - sess, err := clients.GetAWSSession(ctx, dummyRegion, WithCredentialsMaybeIntegration("")) - require.NoError(t, err) - require.NotNil(t, sess) - }) - - t.Run("with an integration session provider, but using ambient credentials, must not call the integration session provider", func(t *testing.T) { - ctx := context.Background() - - clients, err := NewClients(WithAWSIntegrationSessionProvider(func(ctx context.Context, region, integration string) (*awssession.Session, error) { - assert.Fail(t, "should not be called") - return nil, nil - })) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, clients.Close()) }) - - sess, err := clients.GetAWSSession(ctx, dummyRegion, WithAmbientCredentials()) - require.NoError(t, err) - require.NotNil(t, sess) - }) - - t.Run("with an integration session provider, but no credential source defined", func(t *testing.T) { - ctx := context.Background() - - clients, err := NewClients(WithAWSIntegrationSessionProvider(func(ctx context.Context, region, integration string) (*awssession.Session, error) { - assert.Fail(t, "should not be called") - return nil, nil - })) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, clients.Close()) }) - - _, err = clients.GetAWSSession(ctx, dummyRegion) - require.Error(t, err) - require.ErrorContains(t, err, "missing credentials source") - }) -} diff --git a/lib/cloud/mocks/aws_config.go b/lib/cloud/mocks/aws_config.go index d148e9512c8d4..e56804bbde803 100644 --- a/lib/cloud/mocks/aws_config.go +++ b/lib/cloud/mocks/aws_config.go @@ -63,7 +63,10 @@ func (f *FakeOIDCIntegrationClient) GetIntegration(ctx context.Context, name str if f.Unauth { return nil, trace.AccessDenied("unauthorized") } - return f.Integration, nil + if f.Integration.GetName() == name { + return f.Integration, nil + } + return nil, trace.NotFound("integration %q not found", name) } func (f *FakeOIDCIntegrationClient) GenerateAWSOIDCToken(ctx context.Context, integrationName string) (string, error) { diff --git a/lib/service/service.go b/lib/service/service.go index a522f03491201..7003d108b9843 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -2136,11 +2136,6 @@ func (process *TeleportProcess) initAuthService() error { return trace.Wrap(err) } - cloudClients, err := cloud.NewClients() - if err != nil { - return trace.Wrap(err) - } - logger := process.logger.With(teleport.ComponentKey, teleport.Component(teleport.ComponentAuth, process.id)) // first, create the AuthServer @@ -2187,7 +2182,6 @@ func (process *TeleportProcess) initAuthService() error { Clock: cfg.Clock, HTTPClientForAWSSTS: cfg.Auth.HTTPClientForAWSSTS, Tracer: process.TracingProvider.Tracer(teleport.ComponentAuth), - CloudClients: cloudClients, Logger: logger, }, func(as *auth.Server) error { if !process.Config.CachePolicy.Enabled { diff --git a/lib/srv/db/access_test.go b/lib/srv/db/access_test.go index 6da256f8b2654..87b4254bc310c 100644 --- a/lib/srv/db/access_test.go +++ b/lib/srv/db/access_test.go @@ -107,7 +107,6 @@ func TestMain(m *testing.M) { registerTestSnowflakeEngine() registerTestElasticsearchEngine() registerTestSQLServerEngine() - registerTestDynamoDBEngine() os.Exit(m.Run()) } @@ -2483,7 +2482,6 @@ func (p *agentParams) setDefaults(c *testContext) { if p.CloudClients == nil { p.CloudClients = &clients.TestCloudClients{ - STS: &mocks.STSClientV1{}, GCPSQL: p.GCPSQL, } } diff --git a/lib/srv/db/cloud/aws.go b/lib/srv/db/cloud/aws.go index 5ec00224d045b..091f066cebe47 100644 --- a/lib/srv/db/cloud/aws.go +++ b/lib/srv/db/cloud/aws.go @@ -30,7 +30,6 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/lib/cloud" awslib "github.com/gravitational/teleport/lib/cloud/aws" "github.com/gravitational/teleport/lib/cloud/awsconfig" dbiam "github.com/gravitational/teleport/lib/srv/db/common/iam" @@ -40,8 +39,6 @@ import ( type awsConfig struct { // awsConfigProvider provides [aws.Config] for AWS SDK service clients. awsConfigProvider awsconfig.Provider - // clients is an interface for creating AWS clients. - clients cloud.Clients // identity is AWS identity this database agent is running as. identity awslib.Identity // database is the database instance to configure. @@ -55,9 +52,6 @@ type awsConfig struct { // Check validates the config. func (c *awsConfig) Check() error { - if c.clients == nil { - return trace.BadParameter("missing parameter clients") - } if c.identity == nil { return trace.BadParameter("missing parameter identity") } diff --git a/lib/srv/db/cloud/iam.go b/lib/srv/db/cloud/iam.go index 2cd2da23a354d..dfea3893469bc 100644 --- a/lib/srv/db/cloud/iam.go +++ b/lib/srv/db/cloud/iam.go @@ -33,7 +33,6 @@ import ( "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils/retryutils" "github.com/gravitational/teleport/lib/auth/authclient" - "github.com/gravitational/teleport/lib/cloud" awslib "github.com/gravitational/teleport/lib/cloud/aws" "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/services" @@ -48,8 +47,6 @@ type IAMConfig struct { AccessPoint authclient.DatabaseAccessPoint // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. AWSConfigProvider awsconfig.Provider - // Clients is an interface for retrieving cloud clients. - Clients cloud.Clients // HostID is the host identified where this agent is running. // DELETE IN 11.0. HostID string @@ -70,13 +67,6 @@ func (c *IAMConfig) Check() error { if c.AWSConfigProvider == nil { return trace.BadParameter("missing AWSConfigProvider") } - if c.Clients == nil { - cloudClients, err := cloud.NewClients() - if err != nil { - return trace.Wrap(err) - } - c.Clients = cloudClients - } if c.HostID == "" { return trace.BadParameter("missing HostID") } @@ -245,7 +235,6 @@ func (c *IAM) getAWSConfigurator(ctx context.Context, database types.Database) ( } return newAWS(ctx, awsConfig{ awsConfigProvider: c.cfg.AWSConfigProvider, - clients: c.cfg.Clients, database: database, identity: identity, policyName: policyName, diff --git a/lib/srv/db/cloud/iam_test.go b/lib/srv/db/cloud/iam_test.go index 3c7e4c63f2888..cae979ddaa360 100644 --- a/lib/srv/db/cloud/iam_test.go +++ b/lib/srv/db/cloud/iam_test.go @@ -33,7 +33,6 @@ import ( "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/auth/authclient" - clients "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/services" @@ -156,8 +155,7 @@ func TestAWSIAM(t *testing.T) { AWSConfigProvider: &mocks.AWSConfigProvider{ STSClient: stsClient, }, - Clients: &clients.TestCloudClients{}, - HostID: "host-id", + HostID: "host-id", onProcessedTask: func(iamTask, error) { taskChan <- struct{}{} }, @@ -298,13 +296,11 @@ func TestAWSIAMNoPermissions(t *testing.T) { tests := []struct { name string meta types.AWS - clients clients.Clients awsClients awsClientProvider }{ { - name: "RDS database", - meta: types.AWS{Region: "localhost", AccountID: "123456789012", RDS: types.RDS{InstanceID: "postgres-rds", ResourceID: "postgres-rds-resource-id"}}, - clients: &clients.TestCloudClients{}, + name: "RDS database", + meta: types.AWS{Region: "localhost", AccountID: "123456789012", RDS: types.RDS{InstanceID: "postgres-rds", ResourceID: "postgres-rds-resource-id"}}, awsClients: fakeAWSClients{ iamClient: &mocks.IAMMock{Unauth: true}, rdsClient: &mocks.RDSClient{Unauth: true}, @@ -312,9 +308,8 @@ func TestAWSIAMNoPermissions(t *testing.T) { }, }, { - name: "Aurora cluster", - meta: types.AWS{Region: "localhost", AccountID: "123456789012", RDS: types.RDS{ClusterID: "postgres-aurora", ResourceID: "postgres-aurora-resource-id"}}, - clients: &clients.TestCloudClients{}, + name: "Aurora cluster", + meta: types.AWS{Region: "localhost", AccountID: "123456789012", RDS: types.RDS{ClusterID: "postgres-aurora", ResourceID: "postgres-aurora-resource-id"}}, awsClients: fakeAWSClients{ iamClient: &mocks.IAMMock{Unauth: true}, rdsClient: &mocks.RDSClient{Unauth: true}, @@ -322,9 +317,8 @@ func TestAWSIAMNoPermissions(t *testing.T) { }, }, { - name: "RDS database missing metadata", - meta: types.AWS{Region: "localhost", RDS: types.RDS{ClusterID: "postgres-aurora"}}, - clients: &clients.TestCloudClients{}, + name: "RDS database missing metadata", + meta: types.AWS{Region: "localhost", RDS: types.RDS{ClusterID: "postgres-aurora"}}, awsClients: fakeAWSClients{ iamClient: &mocks.IAMMock{Unauth: true}, rdsClient: &mocks.RDSClient{Unauth: true}, @@ -332,27 +326,24 @@ func TestAWSIAMNoPermissions(t *testing.T) { }, }, { - name: "Redshift cluster", - meta: types.AWS{Region: "localhost", AccountID: "123456789012", Redshift: types.Redshift{ClusterID: "redshift-cluster-1"}}, - clients: &clients.TestCloudClients{}, + name: "Redshift cluster", + meta: types.AWS{Region: "localhost", AccountID: "123456789012", Redshift: types.Redshift{ClusterID: "redshift-cluster-1"}}, awsClients: fakeAWSClients{ iamClient: &mocks.IAMMock{Unauth: true}, stsClient: stsClient, }, }, { - name: "ElastiCache", - meta: types.AWS{Region: "localhost", AccountID: "123456789012", ElastiCache: types.ElastiCache{ReplicationGroupID: "some-group"}}, - clients: &clients.TestCloudClients{}, + name: "ElastiCache", + meta: types.AWS{Region: "localhost", AccountID: "123456789012", ElastiCache: types.ElastiCache{ReplicationGroupID: "some-group"}}, awsClients: fakeAWSClients{ iamClient: &mocks.IAMMock{Unauth: true}, stsClient: stsClient, }, }, { - name: "IAM UnmodifiableEntityException", - meta: types.AWS{Region: "localhost", AccountID: "123456789012", Redshift: types.Redshift{ClusterID: "redshift-cluster-1"}}, - clients: &clients.TestCloudClients{}, + name: "IAM UnmodifiableEntityException", + meta: types.AWS{Region: "localhost", AccountID: "123456789012", Redshift: types.Redshift{ClusterID: "redshift-cluster-1"}}, awsClients: fakeAWSClients{ iamClient: &mocks.IAMMock{ Error: &iamtypes.UnmodifiableEntityException{ @@ -369,7 +360,6 @@ func TestAWSIAMNoPermissions(t *testing.T) { // Make configurator. configurator, err := NewIAM(ctx, IAMConfig{ AccessPoint: &mockAccessPoint{}, - Clients: test.clients, HostID: "host-id", AWSConfigProvider: &mocks.AWSConfigProvider{ STSClient: stsClient, diff --git a/lib/srv/db/cloud/meta.go b/lib/srv/db/cloud/meta.go index ca84c8a0a6030..9a1de680d9ed6 100644 --- a/lib/srv/db/cloud/meta.go +++ b/lib/srv/db/cloud/meta.go @@ -41,7 +41,6 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/srv/db/common" discoverycommon "github.com/gravitational/teleport/lib/srv/discovery/common" @@ -147,8 +146,6 @@ func (defaultAWSClients) getSTSClient(cfg aws.Config, optFns ...func(*sts.Option // MetadataConfig is the cloud metadata service config. type MetadataConfig struct { - // Clients is an interface for retrieving cloud clients. - Clients cloud.Clients // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. AWSConfigProvider awsconfig.Provider @@ -158,13 +155,6 @@ type MetadataConfig struct { // Check validates the metadata service config. func (c *MetadataConfig) Check() error { - if c.Clients == nil { - cloudClients, err := cloud.NewClients() - if err != nil { - return trace.Wrap(err) - } - c.Clients = cloudClients - } if c.AWSConfigProvider == nil { return trace.BadParameter("missing AWSConfigProvider") } diff --git a/lib/srv/db/cloud/meta_test.go b/lib/srv/db/cloud/meta_test.go index 1aea1d19e38db..46ef553afb297 100644 --- a/lib/srv/db/cloud/meta_test.go +++ b/lib/srv/db/cloud/meta_test.go @@ -39,7 +39,6 @@ import ( "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/defaults" ) @@ -137,9 +136,6 @@ func TestAWSMetadata(t *testing.T) { // Create metadata fetcher. metadata, err := NewMetadata(MetadataConfig{ - Clients: &cloud.TestCloudClients{ - STS: &fakeSTS.STSClientV1, - }, AWSConfigProvider: &mocks.AWSConfigProvider{ STSClient: fakeSTS, }, @@ -420,9 +416,6 @@ func TestAWSMetadataNoPermissions(t *testing.T) { // Create metadata fetcher. metadata, err := NewMetadata(MetadataConfig{ - Clients: &cloud.TestCloudClients{ - STS: &fakeSTS.STSClientV1, - }, AWSConfigProvider: &mocks.AWSConfigProvider{ STSClient: fakeSTS, }, diff --git a/lib/srv/db/cloud/resource_checker.go b/lib/srv/db/cloud/resource_checker.go index 12fe017c3ca1b..85cd9df8a47fc 100644 --- a/lib/srv/db/cloud/resource_checker.go +++ b/lib/srv/db/cloud/resource_checker.go @@ -45,8 +45,8 @@ type DiscoveryResourceCheckerConfig struct { AWSConfigProvider awsconfig.Provider // ResourceMatchers is a list of database resource matchers. ResourceMatchers []services.ResourceMatcher - // Clients is an interface for retrieving cloud clients. - Clients cloud.Clients + // AzureClients is an interface for retrieving Azure cloud clients. + AzureClients cloud.AzureClients // Context is the database server close context. Context context.Context // Logger is used for logging. @@ -55,12 +55,12 @@ type DiscoveryResourceCheckerConfig struct { // CheckAndSetDefaults validates the config and sets default values. func (c *DiscoveryResourceCheckerConfig) CheckAndSetDefaults() error { - if c.Clients == nil { + if c.AzureClients == nil { cloudClients, err := cloud.NewClients() if err != nil { return trace.Wrap(err) } - c.Clients = cloudClients + c.AzureClients = cloudClients } if c.AWSConfigProvider == nil { return trace.BadParameter("missing AWSConfigProvider") diff --git a/lib/srv/db/cloud/resource_checker_credentials.go b/lib/srv/db/cloud/resource_checker_credentials.go index 0e6d5e7770d55..1902a2d886ffe 100644 --- a/lib/srv/db/cloud/resource_checker_credentials.go +++ b/lib/srv/db/cloud/resource_checker_credentials.go @@ -61,7 +61,7 @@ func newCredentialsChecker(cfg DiscoveryResourceCheckerConfig) (*credentialsChec return &credentialsChecker{ awsConfigProvider: cfg.AWSConfigProvider, awsClients: defaultAWSClients{}, - azureClients: cfg.Clients, + azureClients: cfg.AzureClients, resourceMatchers: cfg.ResourceMatchers, logger: cfg.Logger, cache: cache, diff --git a/lib/srv/db/cloud/resource_checker_url.go b/lib/srv/db/cloud/resource_checker_url.go index b9c3cd59d9c86..947b86b537ea5 100644 --- a/lib/srv/db/cloud/resource_checker_url.go +++ b/lib/srv/db/cloud/resource_checker_url.go @@ -33,7 +33,6 @@ import ( "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils" apiawsutils "github.com/gravitational/teleport/api/utils/aws" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/awsconfig" ) @@ -44,7 +43,6 @@ type urlChecker struct { // awsClients is an SDK client provider. awsClients awsClientProvider - clients cloud.Clients logger *slog.Logger warnOnError bool @@ -60,7 +58,6 @@ func newURLChecker(cfg DiscoveryResourceCheckerConfig) *urlChecker { return &urlChecker{ awsConfigProvider: cfg.AWSConfigProvider, awsClients: defaultAWSClients{}, - clients: cfg.Clients, logger: cfg.Logger, warnOnError: getWarnOnError(), } diff --git a/lib/srv/db/cloud/resource_checker_url_aws_test.go b/lib/srv/db/cloud/resource_checker_url_aws_test.go index 7af30ae2fe7c3..754879b78eb47 100644 --- a/lib/srv/db/cloud/resource_checker_url_aws_test.go +++ b/lib/srv/db/cloud/resource_checker_url_aws_test.go @@ -32,7 +32,6 @@ import ( "github.com/gravitational/teleport/api/types" apiawsutils "github.com/gravitational/teleport/api/utils/aws" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/srv/discovery/common" @@ -119,26 +118,16 @@ func TestURLChecker_AWS(t *testing.T) { require.Len(t, docdbClusterDBs, 2) // Primary, reader. testCases = append(testCases, docdbClusterDBs...) - // Mock cloud clients. - mockClients := &cloud.TestCloudClients{ - STS: &mocks.STSClientV1{}, - } - mockClientsUnauth := &cloud.TestCloudClients{ - STS: &mocks.STSClientV1{}, - } - // Test both check methods. // Note that "No permissions" logs should only be printed during the second // group ("basic endpoint check"). methods := []struct { name string - clients cloud.Clients awsConfigProvider awsconfig.Provider awsClients awsClientProvider }{ { name: "API check", - clients: mockClients, awsConfigProvider: &mocks.AWSConfigProvider{}, awsClients: fakeAWSClients{ ecClient: &mocks.ElastiCacheClient{ @@ -167,7 +156,6 @@ func TestURLChecker_AWS(t *testing.T) { }, { name: "basic endpoint check", - clients: mockClientsUnauth, awsConfigProvider: &mocks.AWSConfigProvider{}, awsClients: fakeAWSClients{ ecClient: &mocks.ElastiCacheClient{Unauth: true}, @@ -183,7 +171,6 @@ func TestURLChecker_AWS(t *testing.T) { for _, method := range methods { t.Run(method.name, func(t *testing.T) { c := newURLChecker(DiscoveryResourceCheckerConfig{ - Clients: method.clients, AWSConfigProvider: method.awsConfigProvider, Logger: utils.NewSlogLoggerForTests(), }) diff --git a/lib/srv/db/cloud/users/users.go b/lib/srv/db/cloud/users/users.go index 45eeba3c3bd7d..95a511b865095 100644 --- a/lib/srv/db/cloud/users/users.go +++ b/lib/srv/db/cloud/users/users.go @@ -33,7 +33,6 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/utils/retryutils" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/srv/db/secrets" "github.com/gravitational/teleport/lib/utils/interval" @@ -43,8 +42,6 @@ import ( type Config struct { // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. AWSConfigProvider awsconfig.Provider - // Clients is an interface for retrieving cloud clients. - Clients cloud.Clients // Clock is used to control time. Clock clockwork.Clock // Interval is the interval between user updates. Interval is also used as @@ -93,13 +90,6 @@ func (c *Config) CheckAndSetDefaults() error { if c.UpdateMeta == nil { return trace.BadParameter("missing UpdateMeta") } - if c.Clients == nil { - cloudClients, err := cloud.NewClients() - if err != nil { - return trace.Wrap(err) - } - c.Clients = cloudClients - } if c.Clock == nil { c.Clock = clockwork.NewRealClock() } diff --git a/lib/srv/db/cloud/users/users_test.go b/lib/srv/db/cloud/users/users_test.go index 9d817db844884..937e85f267225 100644 --- a/lib/srv/db/cloud/users/users_test.go +++ b/lib/srv/db/cloud/users/users_test.go @@ -36,7 +36,6 @@ import ( "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" - clients "github.com/gravitational/teleport/lib/cloud" libaws "github.com/gravitational/teleport/lib/cloud/aws" "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/defaults" @@ -85,7 +84,6 @@ func TestUsers(t *testing.T) { users, err := NewUsers(Config{ AWSConfigProvider: &mocks.AWSConfigProvider{}, - Clients: &clients.TestCloudClients{}, Clock: clock, UpdateMeta: func(_ context.Context, database types.Database) error { // Update db1 to group3 when setupAllDatabases. diff --git a/lib/srv/db/common/auth.go b/lib/srv/db/common/auth.go index d0347f3c6a6f6..92b5c7a785c89 100644 --- a/lib/srv/db/common/auth.go +++ b/lib/srv/db/common/auth.go @@ -1180,26 +1180,18 @@ func (a *dbAuth) GetAWSIAMCreds(ctx context.Context, database types.Database, da return "", "", "", trace.Wrap(err) } - baseSession, err := a.cfg.Clients.GetAWSSession(ctx, dbAWS.Region, - cloud.WithAssumeRoleFromAWSMeta(dbAWS), - cloud.WithAmbientCredentials(), - ) - if err != nil { - return "", "", "", trace.Wrap(err) - } - - // ExternalID should only be used once. If the baseSession assumes a role, - // the chained sessions should have an empty external ID. - - sess, err := a.cfg.Clients.GetAWSSession(ctx, dbAWS.Region, - cloud.WithChainedAssumeRole(baseSession, arn, externalIDForChainedAssumeRole(dbAWS)), - cloud.WithAmbientCredentials(), + awsCfg, err := a.cfg.AWSConfigProvider.GetConfig(ctx, dbAWS.Region, + awsconfig.WithAssumeRole(dbAWS.AssumeRoleARN, dbAWS.ExternalID), + // ExternalID should only be used once. If the baseSession assumes a role, + // the chained sessions should have an empty external ID. + awsconfig.WithAssumeRole(arn, externalIDForChainedAssumeRole(dbAWS)), + awsconfig.WithAmbientCredentials(), ) if err != nil { return "", "", "", trace.Wrap(err) } - creds, err := sess.Config.Credentials.Get() + creds, err := awsCfg.Credentials.Retrieve(ctx) if err != nil { return "", "", "", trace.Wrap(err) } diff --git a/lib/srv/db/common/auth_test.go b/lib/srv/db/common/auth_test.go index a1cfd04f16722..3fb9645b9dddf 100644 --- a/lib/srv/db/common/auth_test.go +++ b/lib/srv/db/common/auth_test.go @@ -609,9 +609,7 @@ func TestAuthGetAWSTokenWithAssumedRole(t *testing.T) { Clock: clock, AuthClient: new(authClientMock), AccessPoint: new(accessPointMock), - Clients: &cloud.TestCloudClients{ - STS: &fakeSTS.STSClientV1, - }, + Clients: &cloud.TestCloudClients{}, AWSConfigProvider: &mocks.AWSConfigProvider{ STSClient: fakeSTS, }, @@ -701,10 +699,10 @@ func TestGetAWSIAMCreds(t *testing.T) { Clock: clock, AuthClient: new(authClientMock), AccessPoint: new(accessPointMock), - Clients: &cloud.TestCloudClients{ - STS: &tt.stsMock.STSClientV1, + Clients: &cloud.TestCloudClients{}, + AWSConfigProvider: &mocks.AWSConfigProvider{ + STSClient: tt.stsMock, }, - AWSConfigProvider: &mocks.AWSConfigProvider{}, awsClients: fakeAWSClients{ stsClient: tt.stsMock, }, diff --git a/lib/srv/db/common/engines.go b/lib/srv/db/common/engines.go index d7a5da72dec37..b989b93462716 100644 --- a/lib/srv/db/common/engines.go +++ b/lib/srv/db/common/engines.go @@ -106,8 +106,8 @@ type EngineConfig struct { AuthClient *authclient.Client // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. AWSConfigProvider awsconfig.Provider - // CloudClients provides access to cloud API clients. - CloudClients cloud.Clients + // GCPClients provides access to Google Cloud API clients. + GCPClients cloud.GCPClients // Context is the database server close context. Context context.Context // Clock is the clock interface. @@ -141,8 +141,8 @@ func (c *EngineConfig) CheckAndSetDefaults() error { if c.AWSConfigProvider == nil { return trace.BadParameter("missing AWSConfigProvider") } - if c.CloudClients == nil { - return trace.BadParameter("engine config CloudClients are missing") + if c.GCPClients == nil { + return trace.BadParameter("engine config GCPClients are missing") } if c.Context == nil { c.Context = context.Background() diff --git a/lib/srv/db/common/engines_test.go b/lib/srv/db/common/engines_test.go index 8ef522db8ded5..14a56c4fc4cd7 100644 --- a/lib/srv/db/common/engines_test.go +++ b/lib/srv/db/common/engines_test.go @@ -51,7 +51,7 @@ func TestRegisterEngine(t *testing.T) { Audit: &testAudit{}, AuthClient: &authclient.Client{}, AWSConfigProvider: &mocks.AWSConfigProvider{}, - CloudClients: cloudClients, + GCPClients: cloudClients, } require.NoError(t, ec.CheckAndSetDefaults()) diff --git a/lib/srv/db/dynamodb/engine.go b/lib/srv/db/dynamodb/engine.go index d877741dc628b..734ff51568f2c 100644 --- a/lib/srv/db/dynamodb/engine.go +++ b/lib/srv/db/dynamodb/engine.go @@ -40,7 +40,6 @@ import ( "github.com/gravitational/teleport" apievents "github.com/gravitational/teleport/api/types/events" apiaws "github.com/gravitational/teleport/api/utils/aws" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/modules" @@ -71,8 +70,6 @@ type Engine struct { // RoundTrippers is a cache of RoundTrippers, mapped by service endpoint. // It is not guarded by a mutex, since requests are processed serially. RoundTrippers map[string]http.RoundTripper - // CredentialsGetter is used to obtain STS credentials. - CredentialsGetter libaws.CredentialsGetter // UseFIPS will ensure FIPS endpoint resolution. UseFIPS bool } @@ -141,18 +138,9 @@ func (e *Engine) HandleConnection(ctx context.Context, _ *common.Session) error } defer e.Audit.OnSessionEnd(e.Context, e.sessionCtx) - meta := e.sessionCtx.Database.GetAWS() - awsSession, err := e.CloudClients.GetAWSSession(ctx, meta.Region, - cloud.WithAssumeRoleFromAWSMeta(meta), - cloud.WithAmbientCredentials(), - ) - if err != nil { - return trace.Wrap(err) - } signer, err := libaws.NewSigningService(libaws.SigningServiceConfig{ Clock: e.Clock, - SessionProvider: libaws.StaticAWSSessionProvider(awsSession), - CredentialsGetter: e.CredentialsGetter, + AWSConfigProvider: e.AWSConfigProvider, }) if err != nil { return trace.Wrap(err) @@ -223,12 +211,14 @@ func (e *Engine) process(ctx context.Context, req *http.Request, signer *libaws. return trace.Wrap(err) } signingCtx := &libaws.SigningCtx{ - SigningName: re.SigningName, - SigningRegion: re.SigningRegion, - Expiry: e.sessionCtx.Identity.Expires, - SessionName: e.sessionCtx.Identity.Username, - AWSRoleArn: roleArn, - SessionTags: e.sessionCtx.Database.GetAWS().SessionTags, + SigningName: re.SigningName, + SigningRegion: re.SigningRegion, + Expiry: e.sessionCtx.Identity.Expires, + SessionName: e.sessionCtx.Identity.Username, + BaseAWSRoleARN: meta.AssumeRoleARN, + BaseAWSExternalID: meta.ExternalID, + AWSRoleArn: roleArn, + SessionTags: e.sessionCtx.Database.GetAWS().SessionTags, } if meta.AssumeRoleARN == "" { signingCtx.AWSExternalID = meta.ExternalID diff --git a/lib/srv/db/dynamodb/test.go b/lib/srv/db/dynamodb/test.go index 92e57e4915c60..b12d0493ea75e 100644 --- a/lib/srv/db/dynamodb/test.go +++ b/lib/srv/db/dynamodb/test.go @@ -101,7 +101,9 @@ func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (*T mux := http.NewServeMux() mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - err := awsutils.VerifyAWSSignatureV2(r, credentials.NewStaticCredentialsProvider("AKIDl", "SECRET", "SESSION")) + err := awsutils.VerifyAWSSignatureV2(r, + credentials.NewStaticCredentialsProvider("FAKEACCESSKEYID", "secret", "token"), + ) if err != nil { code := trace.ErrorToCode(err) body, _ := json.Marshal(jsonErr{ diff --git a/lib/srv/db/dynamodb_test.go b/lib/srv/db/dynamodb_test.go index f7a2b259e110b..16ee32af42315 100644 --- a/lib/srv/db/dynamodb_test.go +++ b/lib/srv/db/dynamodb_test.go @@ -22,48 +22,29 @@ import ( "context" "crypto/tls" "net" - "net/http" "testing" - "github.com/aws/aws-sdk-go-v2/credentials" awsdynamodb "github.com/aws/aws-sdk-go-v2/service/dynamodb" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/defaults" libevents "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/srv/db/common" "github.com/gravitational/teleport/lib/srv/db/dynamodb" - awsutils "github.com/gravitational/teleport/lib/utils/aws" - "github.com/gravitational/teleport/lib/utils/aws/migration" ) -func registerTestDynamoDBEngine() { - // Override DynamoDB engine that is used normally with the test one - // with custom HTTP client. - common.RegisterEngine(newTestDynamoDBEngine, defaults.ProtocolDynamoDB) -} - -func newTestDynamoDBEngine(ec common.EngineConfig) common.Engine { - return &dynamodb.Engine{ - EngineConfig: ec, - RoundTrippers: make(map[string]http.RoundTripper), - // inject mock AWS credentials. - CredentialsGetter: awsutils.NewStaticCredentialsGetter( - migration.NewCredentialsAdapter( - credentials.NewStaticCredentialsProvider("AKIDl", "SECRET", "SESSION"), - ), - ), - } -} - func TestAccessDynamoDB(t *testing.T) { t.Parallel() ctx := context.Background() mockTables := []string{"table-one", "table-two"} - testCtx := setupTestContext(ctx, t, - withDynamoDB("DynamoDB")) + testCtx := setupTestContext(ctx, t) + testCtx.server = testCtx.setupDatabaseServer(ctx, t, agentParams{ + AWSConfigProvider: &mocks.AWSConfigProvider{}, + Databases: []types.Database{withDynamoDB("DynamoDB")(t, ctx, testCtx)}, + }) go testCtx.startHandlingConnections() tests := []struct { @@ -143,8 +124,11 @@ func TestAccessDynamoDB(t *testing.T) { func TestAuditDynamoDB(t *testing.T) { ctx := context.Background() - testCtx := setupTestContext(ctx, t, - withDynamoDB("DynamoDB")) + testCtx := setupTestContext(ctx, t) + testCtx.server = testCtx.setupDatabaseServer(ctx, t, agentParams{ + AWSConfigProvider: &mocks.AWSConfigProvider{}, + Databases: []types.Database{withDynamoDB("DynamoDB")(t, ctx, testCtx)}, + }) go testCtx.startHandlingConnections() testCtx.createUserAndRole(ctx, t, "alice", "admin", []string{"admin"}, []string{types.Wildcard}) diff --git a/lib/srv/db/mysql/engine.go b/lib/srv/db/mysql/engine.go index 9828d484b7bba..369e4a832ab86 100644 --- a/lib/srv/db/mysql/engine.go +++ b/lib/srv/db/mysql/engine.go @@ -237,7 +237,7 @@ func (e *Engine) connect(ctx context.Context, sessionCtx *common.Session) (*clie } case sessionCtx.Database.IsCloudSQL(): // Get the client once for subsequent calls (it acquires a read lock). - gcpClient, err := e.CloudClients.GetGCPSQLAdminClient(ctx) + gcpClient, err := e.GCPClients.GetGCPSQLAdminClient(ctx) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/srv/db/objects/fetcher.go b/lib/srv/db/objects/fetcher.go index 676c2b822b320..af1887e021794 100644 --- a/lib/srv/db/objects/fetcher.go +++ b/lib/srv/db/objects/fetcher.go @@ -38,10 +38,10 @@ type ImportRulesReader interface { // ObjectFetcherConfig provides static object fetcher configuration. type ObjectFetcherConfig struct { - ImportRules ImportRulesReader - Auth common.Auth - CloudClients libcloud.Clients - Log *slog.Logger + ImportRules ImportRulesReader + Auth common.Auth + GCPClients libcloud.GCPClients + Log *slog.Logger } // ObjectFetcher defines an interface for retrieving database objects. diff --git a/lib/srv/db/objects/importer.go b/lib/srv/db/objects/importer.go index b5c3d6d5a3a9a..295bd0e99cb14 100644 --- a/lib/srv/db/objects/importer.go +++ b/lib/srv/db/objects/importer.go @@ -51,10 +51,10 @@ func startDatabaseImporter(ctx context.Context, cfg Config, database types.Datab cfg.Log = cfg.Log.With("database", database.GetName(), "protocol", database.GetProtocol()) fetcher, err := GetObjectFetcher(ctx, database, ObjectFetcherConfig{ - ImportRules: cfg.ImportRules, - Auth: cfg.Auth, - CloudClients: cfg.CloudClients, - Log: cfg.Log, + ImportRules: cfg.ImportRules, + Auth: cfg.Auth, + GCPClients: cfg.GCPClients, + Log: cfg.Log, }) if err != nil { return nil, trace.Wrap(err) diff --git a/lib/srv/db/objects/objects.go b/lib/srv/db/objects/objects.go index ae292ed13b52d..248a388264062 100644 --- a/lib/srv/db/objects/objects.go +++ b/lib/srv/db/objects/objects.go @@ -42,7 +42,7 @@ type Config struct { DatabaseObjectClient *databaseobject.Client ImportRules ImportRulesReader Auth common.Auth - CloudClients cloud.Clients + GCPClients cloud.GCPClients // ScanInterval specifies how often the database is scanned. // A higher ScanInterval reduces the load on the database and database agent, @@ -113,8 +113,8 @@ func (c *Config) CheckAndSetDefaults(ctx context.Context) error { if c.Auth == nil { return trace.BadParameter("missing parameter Auth") } - if c.CloudClients == nil { - return trace.BadParameter("missing parameter CloudClients") + if c.GCPClients == nil { + return trace.BadParameter("missing parameter GCPClients") } if c.Log == nil { c.Log = slog.Default().With(teleport.ComponentKey, "db:obj_importer") diff --git a/lib/srv/db/postgres/connector.go b/lib/srv/db/postgres/connector.go index 81873b6afd7b2..3a73d9e58b017 100644 --- a/lib/srv/db/postgres/connector.go +++ b/lib/srv/db/postgres/connector.go @@ -34,9 +34,9 @@ import ( ) type connector struct { - auth common.Auth - cloudClients libcloud.Clients - log *slog.Logger + auth common.Auth + gcpClients libcloud.GCPClients + log *slog.Logger certExpiry time.Time database types.Database @@ -91,7 +91,7 @@ func (c *connector) getConnectConfig(ctx context.Context) (*pgconn.Config, error return nil, trace.Wrap(err) } // Get the client once for subsequent calls (it acquires a read lock). - gcpClient, err := c.cloudClients.GetGCPSQLAdminClient(ctx) + gcpClient, err := c.gcpClients.GetGCPSQLAdminClient(ctx) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/srv/db/postgres/engine.go b/lib/srv/db/postgres/engine.go index ec1f95d5fa121..f7b581b65792b 100644 --- a/lib/srv/db/postgres/engine.go +++ b/lib/srv/db/postgres/engine.go @@ -505,9 +505,9 @@ func (e *Engine) receiveFromServer(serverConn *pgconn.PgConn, serverErrCh chan<- func (e *Engine) newConnector(sessionCtx *common.Session) *connector { conn := &connector{ - auth: e.Auth, - cloudClients: e.CloudClients, - log: e.Log, + auth: e.Auth, + gcpClients: e.GCPClients, + log: e.Log, certExpiry: sessionCtx.GetExpiry(), database: sessionCtx.Database, @@ -533,9 +533,6 @@ func (e *Engine) handleCancelRequest(ctx context.Context, sessionCtx *common.Ses // Instead, use the pgconn config string parser for convenience and dial // db host:port ourselves. network, address := pgconn.NetworkAddress(config.Host, config.Port) - if err != nil { - return trace.Wrap(err) - } dialer := net.Dialer{Timeout: defaults.DefaultIOTimeout} conn, err := dialer.DialContext(ctx, network, address) if err != nil { diff --git a/lib/srv/db/postgres/objects.go b/lib/srv/db/postgres/objects.go index f965baf5cf05c..67d2a37e98465 100644 --- a/lib/srv/db/postgres/objects.go +++ b/lib/srv/db/postgres/objects.go @@ -119,9 +119,9 @@ func (f *objectFetcher) getDatabaseNames(ctx context.Context) ([]string, error) func (f *objectFetcher) connectAsAdmin(ctx context.Context, databaseName string) (*pgx.Conn, error) { conn := &connector{ - auth: f.cfg.Auth, - cloudClients: f.cfg.CloudClients, - log: f.cfg.Log, + auth: f.cfg.Auth, + gcpClients: f.cfg.GCPClients, + log: f.cfg.Log, certExpiry: time.Now().Add(time.Hour), database: f.db, diff --git a/lib/srv/db/postgres/users.go b/lib/srv/db/postgres/users.go index 56fb14a2dd9d8..1e49614bb7224 100644 --- a/lib/srv/db/postgres/users.go +++ b/lib/srv/db/postgres/users.go @@ -204,10 +204,10 @@ func (e *Engine) applyPermissions(ctx context.Context, sessionCtx *common.Sessio } fetcher, err := objects.GetObjectFetcher(ctx, sessionCtx.Database, objects.ObjectFetcherConfig{ - ImportRules: e.AuthClient, - Auth: e.Auth, - CloudClients: e.CloudClients, - Log: e.Log, + ImportRules: e.AuthClient, + Auth: e.Auth, + GCPClients: e.GCPClients, + Log: e.Log, }) if err != nil { return trace.Wrap(err) diff --git a/lib/srv/db/server.go b/lib/srv/db/server.go index 223744173ae7d..b4a122b376502 100644 --- a/lib/srv/db/server.go +++ b/lib/srv/db/server.go @@ -210,7 +210,6 @@ func (c *Config) CheckAndSetDefaults(ctx context.Context) (err error) { } if c.AWSDatabaseFetcherFactory == nil { factory, err := db.NewAWSFetcherFactory(db.AWSFetcherFactoryConfig{ - CloudClients: c.CloudClients, AWSConfigProvider: c.AWSConfigProvider, }) if err != nil { @@ -253,7 +252,6 @@ func (c *Config) CheckAndSetDefaults(ctx context.Context) (err error) { } if c.CloudMeta == nil { c.CloudMeta, err = cloud.NewMetadata(cloud.MetadataConfig{ - Clients: c.CloudClients, AWSConfigProvider: c.AWSConfigProvider, }) if err != nil { @@ -264,7 +262,6 @@ func (c *Config) CheckAndSetDefaults(ctx context.Context) (err error) { c.CloudIAM, err = cloud.NewIAM(ctx, cloud.IAMConfig{ AccessPoint: c.AccessPoint, AWSConfigProvider: c.AWSConfigProvider, - Clients: c.CloudClients, HostID: c.HostID, }) if err != nil { @@ -289,7 +286,6 @@ func (c *Config) CheckAndSetDefaults(ctx context.Context) (err error) { } c.CloudUsers, err = users.NewUsers(users.Config{ AWSConfigProvider: c.AWSConfigProvider, - Clients: c.CloudClients, UpdateMeta: c.CloudMeta.Update, ClusterName: clusterName.GetClusterName(), }) @@ -303,7 +299,7 @@ func (c *Config) CheckAndSetDefaults(ctx context.Context) (err error) { DatabaseObjectClient: c.AuthClient.DatabaseObjectsClient(), ImportRules: c.AuthClient, Auth: c.Auth, - CloudClients: c.CloudClients, + GCPClients: c.CloudClients, }) if err != nil { return trace.Wrap(err) @@ -313,7 +309,7 @@ func (c *Config) CheckAndSetDefaults(ctx context.Context) (err error) { if c.discoveryResourceChecker == nil { c.discoveryResourceChecker, err = cloud.NewDiscoveryResourceChecker(cloud.DiscoveryResourceCheckerConfig{ ResourceMatchers: c.ResourceMatchers, - Clients: c.CloudClients, + AzureClients: c.CloudClients, AWSConfigProvider: c.AWSConfigProvider, Context: ctx, }) @@ -1204,7 +1200,7 @@ func (s *Server) createEngine(sessionCtx *common.Session, audit common.Audit) (c Audit: audit, AuthClient: s.cfg.AuthClient, AWSConfigProvider: s.cfg.AWSConfigProvider, - CloudClients: s.cfg.CloudClients, + GCPClients: s.cfg.CloudClients, Context: s.connContext, Clock: s.cfg.Clock, Log: sessionCtx.Log, diff --git a/lib/srv/db/watcher_test.go b/lib/srv/db/watcher_test.go index 8e35120eddfa9..e6e06ff35202b 100644 --- a/lib/srv/db/watcher_test.go +++ b/lib/srv/db/watcher_test.go @@ -335,15 +335,8 @@ func TestWatcherCloudFetchers(t *testing.T) { ctx := context.Background() testCtx := setupTestContext(ctx, t) - testCloudClients := &clients.TestCloudClients{ - AzureSQLServer: azure.NewSQLClientByAPI(&azure.ARMSQLServerMock{ - AllServers: []*armsql.Server{azSQLServer}, - }), - AzureManagedSQLServer: azure.NewManagedSQLClientByAPI(&azure.ARMSQLManagedServerMock{}), - } dbFetcherFactory, err := db.NewAWSFetcherFactory(db.AWSFetcherFactoryConfig{ AWSConfigProvider: &mocks.AWSConfigProvider{}, - CloudClients: testCloudClients, AWSClients: fakeAWSClients{ rdsClient: &mocks.RDSClient{Unauth: true}, // Access denied error should not affect other fetchers. rssClient: &mocks.RedshiftServerlessClient{ @@ -376,7 +369,6 @@ func TestWatcherCloudFetchers(t *testing.T) { }, }}, CloudClients: &clients.TestCloudClients{ - STS: &mocks.STSClientV1{}, AzureSQLServer: azure.NewSQLClientByAPI(&azure.ARMSQLServerMock{ AllServers: []*armsql.Server{azSQLServer}, }), diff --git a/lib/srv/discovery/access_graph_aws.go b/lib/srv/discovery/access_graph_aws.go index 185dd3f908a68..9b17be7fd3dc4 100644 --- a/lib/srv/discovery/access_graph_aws.go +++ b/lib/srv/discovery/access_graph_aws.go @@ -505,7 +505,6 @@ func (s *Server) accessGraphAWSFetchersFromMatchers(ctx context.Context, matcher ctx, aws_sync.Config{ AWSConfigProvider: s.AWSConfigProvider, - CloudClients: s.CloudClients, GetEKSClient: s.GetAWSSyncEKSClient, GetEC2Client: s.GetEC2Client, AssumeRole: assumeRole, diff --git a/lib/srv/discovery/access_graph_test.go b/lib/srv/discovery/access_graph_test.go index c6efe1064cbfc..633770bce7dd3 100644 --- a/lib/srv/discovery/access_graph_test.go +++ b/lib/srv/discovery/access_graph_test.go @@ -190,11 +190,11 @@ func TestServer_updateDiscoveryConfigStatus(t *testing.T) { name: "merge two errors", args: args{ fetchers: []*fakeFetcher{ - &fakeFetcher{ + { discoveryConfigName: "test1", err: fmt.Errorf("error in fetcher 1"), }, - &fakeFetcher{ + { discoveryConfigName: "test1", err: fmt.Errorf("error in fetcher 2"), }, diff --git a/lib/srv/discovery/discovery.go b/lib/srv/discovery/discovery.go index b5c9b14df95ec..b29cef50e2cc9 100644 --- a/lib/srv/discovery/discovery.go +++ b/lib/srv/discovery/discovery.go @@ -36,7 +36,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ssm" ssmtypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go/aws/session" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "google.golang.org/protobuf/types/known/timestamppb" @@ -57,7 +56,6 @@ import ( "github.com/gravitational/teleport/lib/cloud/awsconfig" gcpimds "github.com/gravitational/teleport/lib/cloud/imds/gcp" "github.com/gravitational/teleport/lib/cryptosuites" - "github.com/gravitational/teleport/lib/integrations/awsoidc" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/services/readonly" "github.com/gravitational/teleport/lib/srv/discovery/common" @@ -240,12 +238,7 @@ func (c *Config) CheckAndSetDefaults() error { kubernetes matchers are present.`) } if c.CloudClients == nil { - awsIntegrationSessionProvider := func(ctx context.Context, region, integration string) (*session.Session, error) { - return awsoidc.NewSessionV1(ctx, c.AccessPoint, region, integration) - } - cloudClients, err := cloud.NewClients( - cloud.WithAWSIntegrationSessionProvider(awsIntegrationSessionProvider), - ) + cloudClients, err := cloud.NewClients() if err != nil { return trace.Wrap(err, "unable to create cloud clients") } @@ -264,7 +257,6 @@ kubernetes matchers are present.`) } if c.AWSDatabaseFetcherFactory == nil { factory, err := db.NewAWSFetcherFactory(db.AWSFetcherFactoryConfig{ - CloudClients: c.CloudClients, AWSConfigProvider: c.AWSConfigProvider, }) if err != nil { diff --git a/lib/srv/discovery/discovery_test.go b/lib/srv/discovery/discovery_test.go index dcde4aa6d386b..bc397e4e717be 100644 --- a/lib/srv/discovery/discovery_test.go +++ b/lib/srv/discovery/discovery_test.go @@ -365,7 +365,6 @@ func TestDiscoveryServer(t *testing.T) { wantInstalledInstances []string wantDiscoveryConfigStatus *discoveryconfig.Status userTasksDiscoverCheck require.ValueAssertionFunc - cloudClients cloud.Clients ssmRunError error }{ { @@ -947,7 +946,6 @@ func TestDiscoveryServer(t *testing.T) { Emitter: tc.emitter, Log: logger, DiscoveryGroup: defaultDiscoveryGroup, - CloudClients: tc.cloudClients, clock: fakeClock, }) require.NoError(t, err) @@ -2174,7 +2172,6 @@ func TestDiscoveryDatabase(t *testing.T) { } testCloudClients := &cloud.TestCloudClients{ - STS: &mocks.STSClientV1{}, AzureRedis: azure.NewRedisClientByAPI(&azure.ARMRedisMock{ Servers: []*armredis.ResourceInfo{azRedisResource}, }), @@ -2550,7 +2547,6 @@ func TestDiscoveryDatabase(t *testing.T) { } dbFetcherFactory, err := db.NewAWSFetcherFactory(db.AWSFetcherFactoryConfig{ AWSConfigProvider: fakeConfigProvider, - CloudClients: testCloudClients, AWSClients: fakeAWSClients{ ecClient: &mocks.ElastiCacheClient{}, mdbClient: &mocks.MemoryDBClient{}, @@ -2685,12 +2681,8 @@ func TestDiscoveryDatabaseRemovingDiscoveryConfigs(t *testing.T) { fakeConfigProvider := &mocks.AWSConfigProvider{ STSClient: &mocks.STSClient{}, } - testCloudClients := &cloud.TestCloudClients{ - STS: &fakeConfigProvider.STSClient.STSClientV1, - } dbFetcherFactory, err := db.NewAWSFetcherFactory(db.AWSFetcherFactoryConfig{ AWSConfigProvider: fakeConfigProvider, - CloudClients: testCloudClients, AWSClients: fakeAWSClients{ rdsClient: &mocks.RDSClient{ DBInstances: []rdstypes.DBInstance{*awsRDSInstance}, @@ -2730,7 +2722,6 @@ func TestDiscoveryDatabaseRemovingDiscoveryConfigs(t *testing.T) { &Config{ AWSConfigProvider: fakeConfigProvider, AWSDatabaseFetcherFactory: dbFetcherFactory, - CloudClients: testCloudClients, ClusterFeatures: func() proto.Features { return proto.Features{} }, KubernetesClient: fake.NewSimpleClientset(), AccessPoint: getDiscoveryAccessPoint(tlsServer.Auth(), authClient), diff --git a/lib/srv/discovery/fetchers/aws-sync/aws-sync.go b/lib/srv/discovery/fetchers/aws-sync/aws-sync.go index 21a5ca1af85e8..146637659bba7 100644 --- a/lib/srv/discovery/fetchers/aws-sync/aws-sync.go +++ b/lib/srv/discovery/fetchers/aws-sync/aws-sync.go @@ -35,7 +35,6 @@ import ( usageeventsv1 "github.com/gravitational/teleport/api/gen/proto/go/usageevents/v1" accessgraphv1alpha "github.com/gravitational/teleport/gen/proto/go/accessgraph/v1alpha" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/srv/server" ) @@ -48,8 +47,6 @@ const pageSize int32 = 500 type Config struct { // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. AWSConfigProvider awsconfig.Provider - // CloudClients is the cloud clients to use when fetching AWS resources. - CloudClients cloud.Clients // GetEKSClient gets an AWS EKS client for the given region. GetEKSClient EKSClientGetter // GetEC2Client gets an AWS EC2 client for the given region. diff --git a/lib/srv/discovery/fetchers/db/aws.go b/lib/srv/discovery/fetchers/db/aws.go index 24de91e83e309..10a49173a5a3a 100644 --- a/lib/srv/discovery/fetchers/db/aws.go +++ b/lib/srv/discovery/fetchers/db/aws.go @@ -27,7 +27,6 @@ import ( "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/srv/discovery/common" ) @@ -49,8 +48,6 @@ type awsFetcherPlugin interface { // awsFetcherConfig is the AWS database fetcher configuration. type awsFetcherConfig struct { - // AWSClients are the AWS API clients. - AWSClients cloud.AWSClients // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. AWSConfigProvider awsconfig.Provider // Type is the type of DB matcher, for example "rds", "redshift", etc. @@ -78,9 +75,6 @@ type awsFetcherConfig struct { // CheckAndSetDefaults validates the config and sets defaults. func (cfg *awsFetcherConfig) CheckAndSetDefaults(component string) error { - if cfg.AWSClients == nil { - return trace.BadParameter("missing parameter AWSClients") - } if cfg.AWSConfigProvider == nil { return trace.BadParameter("missing AWSConfigProvider") } diff --git a/lib/srv/discovery/fetchers/db/aws_redshift_serverless_test.go b/lib/srv/discovery/fetchers/db/aws_redshift_serverless_test.go index dd64dcdade45f..bf657836520c6 100644 --- a/lib/srv/discovery/fetchers/db/aws_redshift_serverless_test.go +++ b/lib/srv/discovery/fetchers/db/aws_redshift_serverless_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" - "github.com/gravitational/teleport/lib/cloud" "github.com/gravitational/teleport/lib/cloud/awstesthelpers" "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/srv/discovery/common" @@ -51,8 +50,7 @@ func TestRedshiftServerlessFetcher(t *testing.T) { tests := []awsFetcherTest{ { - name: "fetch all", - inputClients: &cloud.TestCloudClients{}, + name: "fetch all", fetcherCfg: AWSFetcherFactoryConfig{ AWSClients: fakeAWSClients{ rssClient: &mocks.RedshiftServerlessClient{ diff --git a/lib/srv/discovery/fetchers/db/db.go b/lib/srv/discovery/fetchers/db/db.go index d7cca43f75dfe..eca656ca067a9 100644 --- a/lib/srv/discovery/fetchers/db/db.go +++ b/lib/srv/discovery/fetchers/db/db.go @@ -121,14 +121,9 @@ type AWSFetcherFactoryConfig struct { AWSConfigProvider awsconfig.Provider // AWSClients provides AWS SDK clients. AWSClients AWSClientProvider - // CloudClients is an interface for retrieving AWS SDK v1 cloud clients. - CloudClients cloud.AWSClients } func (c *AWSFetcherFactoryConfig) checkAndSetDefaults() error { - if c.CloudClients == nil { - return trace.BadParameter("missing CloudClients") - } if c.AWSConfigProvider == nil { return trace.BadParameter("missing AWSConfigProvider") } @@ -173,7 +168,6 @@ func (f *AWSFetcherFactory) MakeFetchers(ctx context.Context, matchers []types.A for _, makeFetcher := range makeFetchers { for _, region := range matcher.Regions { fetcher, err := makeFetcher(awsFetcherConfig{ - AWSClients: f.cfg.CloudClients, Type: matcherType, AssumeRole: assumeRole, Labels: matcher.Tags, diff --git a/lib/srv/discovery/fetchers/db/helpers_test.go b/lib/srv/discovery/fetchers/db/helpers_test.go index 5feae42c7b367..69b94ff9b0896 100644 --- a/lib/srv/discovery/fetchers/db/helpers_test.go +++ b/lib/srv/discovery/fetchers/db/helpers_test.go @@ -112,7 +112,6 @@ var testAssumeRole = types.AssumeRole{ // awsFetcherTest is a common test struct for AWS fetchers. type awsFetcherTest struct { name string - inputClients *cloud.TestCloudClients fetcherCfg AWSFetcherFactoryConfig inputMatchers []types.AWSMatcher wantDatabases types.Databases @@ -125,11 +124,6 @@ func testAWSFetchers(t *testing.T, tests ...awsFetcherTest) { for _, test := range tests { test := test fakeSTS := &mocks.STSClient{} - if test.inputClients != nil { - require.Nil(t, test.inputClients.STS, "testAWSFetchers injects an STS mock itself, but test input had already configured it. This is a test configuration error.") - test.inputClients.STS = &fakeSTS.STSClientV1 - } - test.fetcherCfg.CloudClients = test.inputClients require.Nil(t, test.fetcherCfg.AWSConfigProvider, "testAWSFetchers injects a fake AWSConfigProvider, but the test input had already configured it. This is a test configuration error.") test.fetcherCfg.AWSConfigProvider = &mocks.AWSConfigProvider{ STSClient: fakeSTS, diff --git a/lib/srv/server/azure_watcher_test.go b/lib/srv/server/azure_watcher_test.go index 0c9a183b1fc89..3ff11436ebc06 100644 --- a/lib/srv/server/azure_watcher_test.go +++ b/lib/srv/server/azure_watcher_test.go @@ -33,7 +33,7 @@ import ( ) type mockClients struct { - cloud.Clients + cloud.AzureClients azureClient azure.VirtualMachinesClient } From 7d042c15b4e3bcf5e9916de2806e2fd35cc6efc3 Mon Sep 17 00:00:00 2001 From: Paul Gottschling Date: Thu, 30 Jan 2025 12:03:21 -0500 Subject: [PATCH 04/28] Upgrade Vale in the prose check workflow (#51619) --- .github/workflows/doc-tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/doc-tests.yaml b/.github/workflows/doc-tests.yaml index 54df4b0743ef2..60685a0b8295e 100644 --- a/.github/workflows/doc-tests.yaml +++ b/.github/workflows/doc-tests.yaml @@ -134,7 +134,7 @@ jobs: - name: Run the linter uses: errata-ai/vale-action@d89dee975228ae261d22c15adcd03578634d429c # v2.1.1 with: - version: 2.30.0 + version: 3.9.4 # Take the comma-separated list of files returned by the "Check for # relevant changes" job. separator: "," From f4514b4d34d6189f6b356dd860b10e4de99df358 Mon Sep 17 00:00:00 2001 From: Brian Joerger Date: Thu, 30 Jan 2025 09:40:46 -0800 Subject: [PATCH 05/28] Address todos. (#50417) --- .../wizards/AddAuthDeviceWizard.tsx | 7 --- .../teleport/src/services/auth/auth.ts | 59 ------------------- 2 files changed, 66 deletions(-) diff --git a/web/packages/teleport/src/Account/ManageDevices/wizards/AddAuthDeviceWizard.tsx b/web/packages/teleport/src/Account/ManageDevices/wizards/AddAuthDeviceWizard.tsx index be291c660f55a..d161d8db54b93 100644 --- a/web/packages/teleport/src/Account/ManageDevices/wizards/AddAuthDeviceWizard.tsx +++ b/web/packages/teleport/src/Account/ManageDevices/wizards/AddAuthDeviceWizard.tsx @@ -73,12 +73,6 @@ export function AddAuthDeviceWizard({ const reauthState = useReAuthenticate({ challengeScope: MfaChallengeScope.MANAGE_DEVICES, onMfaResponse: mfaResponse => - // TODO(Joerger): Instead of getting a privilege token, we should get - // // a register challenge with the mfa response directly. For good UX, this would - // // require some refactoring to the flow so the user can choose a device type before - // // completing an mfa check and getting an otp/webauthn register challenge, or - // // allowing the backend to return a flexible register challenge - // await auth.createPrivilegeToken(mfaResponse).then(setPrivilegeToken); auth.createPrivilegeToken(mfaResponse).then(setPrivilegeToken), }); @@ -188,7 +182,6 @@ export function CreateDeviceStep({ if (usage === 'passwordless' || newMfaDeviceType === 'webauthn') { createPasskeyAttempt.run(async () => { const credential = await auth.createNewWebAuthnDevice({ - // TODO(Joerger): Skip privilege token step, just pass in mfa response. tokenId: privilegeToken, deviceUsage: usage, }); diff --git a/web/packages/teleport/src/services/auth/auth.ts b/web/packages/teleport/src/services/auth/auth.ts index 3e58e9f5f5f4b..4d1e978f2f64c 100644 --- a/web/packages/teleport/src/services/auth/auth.ts +++ b/web/packages/teleport/src/services/auth/auth.ts @@ -341,7 +341,6 @@ const auth = { }); }, - // TODO(Joerger): Delete once no longer used by /e async getSsoChallengeResponse( challenge: SsoChallenge ): Promise { @@ -386,63 +385,10 @@ const auth = { }; }, - // TODO(Joerger): Delete once no longer used by /e - createPrivilegeTokenWithWebauthn() { - return auth - .getMfaChallenge({ scope: MfaChallengeScope.MANAGE_DEVICES }) - .then(auth.getMfaChallengeResponse) - .then(mfaResp => auth.createPrivilegeToken(mfaResp)); - }, - - // TODO(Joerger): Delete once no longer used by /e - createPrivilegeTokenWithTotp(secondFactorToken: string) { - return api.post(cfg.api.createPrivilegeTokenPath, { secondFactorToken }); - }, - createRestrictedPrivilegeToken() { return api.post(cfg.api.createPrivilegeTokenPath, {}); }, - // TODO(Joerger): Remove once /e is no longer using it. - async getWebauthnResponse( - scope: MfaChallengeScope, - allowReuse?: boolean, - isMfaRequiredRequest?: IsMfaRequiredRequest, - abortSignal?: AbortSignal - ) { - // TODO(Joerger): DELETE IN 16.0.0 - // the create mfa challenge endpoint below supports - // MFARequired requests without the extra roundtrip. - if (isMfaRequiredRequest) { - try { - const isMFARequired = await checkMfaRequired( - isMfaRequiredRequest, - abortSignal - ); - if (!isMFARequired.required) { - return; - } - } catch (err) { - if ( - err?.response?.status === 400 && - err?.message.includes('missing target for MFA check') - ) { - // checking MFA requirement for admin actions is not supported by old - // auth servers, we expect an error instead. In this case, assume MFA is - // not required. Callers should fallback to retrying with MFA if needed. - return; - } - - throw err; - } - } - - return auth - .getMfaChallenge({ scope, allowReuse, isMfaRequiredRequest }, abortSignal) - .then(challenge => auth.getMfaChallengeResponse(challenge, 'webauthn')) - .then(res => res.webauthn_response); - }, - getMfaChallengeResponseForAdminAction(allowReuse?: boolean) { // If the client is checking if MFA is required for an admin action, // but we know admin action MFA is not enforced, return early. @@ -460,11 +406,6 @@ const auth = { }) .then(auth.getMfaChallengeResponse); }, - - // TODO(Joerger): Delete in favor of getMfaChallengeResponseForAdminAction once /e is updated. - getWebauthnResponseForAdminAction(allowReuse?: boolean) { - return auth.getMfaChallengeResponseForAdminAction(allowReuse); - }, }; function checkMfaRequired( From f44af9b61d33495784fb9589016baed335209e0d Mon Sep 17 00:00:00 2001 From: "STeve (Xin) Huang" Date: Thu, 30 Jan 2025 12:52:43 -0500 Subject: [PATCH 06/28] [docs] PostgreSQL auto-user provisioning guide minor edits (#48897) * [docs] PostgreSQL auto-user provisioning guide minor edits * remove admin option from rds_superuser and add note on admin option for each role --- .../auto-user-provisioning/postgres.mdx | 36 ++++++++++++++----- .../postgres15-grant-create.mdx | 11 ------ 2 files changed, 27 insertions(+), 20 deletions(-) delete mode 100644 docs/pages/includes/database-access/auto-user-provisioning/postgres15-grant-create.mdx diff --git a/docs/pages/enroll-resources/database-access/auto-user-provisioning/postgres.mdx b/docs/pages/enroll-resources/database-access/auto-user-provisioning/postgres.mdx index 2fb8c7c1aac83..46f44bf3306ca 100644 --- a/docs/pages/enroll-resources/database-access/auto-user-provisioning/postgres.mdx +++ b/docs/pages/enroll-resources/database-access/auto-user-provisioning/postgres.mdx @@ -41,6 +41,19 @@ Note that the RDS database must have IAM authentication enabled. Refer to the [AWS documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.DBAccounts.html) to make sure you are using the `rds_iam` role correctly. for more information. + +If the admin user needs to grant the `rds_superuser` role to auto-provisioned +users, the admin user must also be a `rds_superuser`: +```sql +GRANT rds_superuser TO "teleport-admin"; +``` + +For PostgreSQL 16+, you must grant the `ADMIN` option to the admin user for each +PostgreSQL role that Teleport will assign to your Teleport user. For example, to +allow the admin user to grant and revoke role `reader`: +```sql +GRANT reader TO "teleport-admin" WITH ADMIN OPTION; +``` The self-hosted PostgreSQL admin user must have X.509 authentication configured. @@ -49,6 +62,13 @@ The self-hosted PostgreSQL admin user must have X.509 authentication configured. CREATE USER "teleport-admin" login createrole; ``` +For PostgreSQL 16+, you must grant the `ADMIN` option to the admin user for each +PostgreSQL role that Teleport will assign to your Teleport user. For example, to +allow the admin user to grant and revoke role `reader`: +```sql +GRANT reader TO "teleport-admin" WITH ADMIN OPTION; +``` + Note that the database must be configured to accept client certificate auth for the admin user by having the following entries in `pg_hba.conf`: @@ -63,7 +83,13 @@ to ensure that your configuration is correct. -When [Database Access Controls](../rbac.mdx) feature is in use, the `teleport-admin` should have permissions to relevant database objects. For example: +When [Database Access Controls](../rbac.mdx) feature is in use, the +`teleport-admin` should have permissions to relevant database objects. You can +grant `teleport-admin` the `SUPERUSER` option for self-hosted databases, or the +`rds_superuser` role for RDS databases. + +For improved security through the principle of least privilege, you can also +assign permissions directly to specific database objects. For example: ```sql GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA schema1, schema2, schema3 TO "teleport-admin"; @@ -75,10 +101,6 @@ the database, which will be created automatically if it doesn't exist. (!docs/pages/includes/database-access/auto-user-provisioning/db-definition-default-dbname.mdx protocol="postgres" uri="localhost:5432" default="the same database that the user is accessing" !) - -(!docs/pages/includes/database-access/auto-user-provisioning/postgres15-grant-create.mdx!) - - ## Step 2/3. Configure a Teleport role Database permissions are associated with a Teleport role, which can either allocate predefined database roles (configured in each database) or define specific database object permissions directly. Teleport grants these permissions for the duration of the connection. @@ -169,10 +191,6 @@ Users created within the database will: ## Troubleshooting -### Permission denied for schema public error - -(!docs/pages/includes/database-access/auto-user-provisioning/postgres15-grant-create.mdx!) - ### User does not have CONNECT privilege error You may encounter the following error when the admin user or the roles assigned diff --git a/docs/pages/includes/database-access/auto-user-provisioning/postgres15-grant-create.mdx b/docs/pages/includes/database-access/auto-user-provisioning/postgres15-grant-create.mdx deleted file mode 100644 index 012fe0e585848..0000000000000 --- a/docs/pages/includes/database-access/auto-user-provisioning/postgres15-grant-create.mdx +++ /dev/null @@ -1,11 +0,0 @@ -PostgreSQL 15 revokes the `CREATE` permission from all users except a database -owner from the public (or default) schema. - -Grant the admin user `CREATE` privilege so the admin user can create procedures: -```sql -GRANT CREATE ON SCHEMA public TO "teleport-admin"; -``` - -If `admin_user.default_database` is specified, the `CREATE` privilege is only -required for the database specified in the `default_database`. Otherwise, you -have to repeat the privilege grant for every database Teleport will access. From 176488fa3bbc1c05a4d3cf8df33e5c920fab818a Mon Sep 17 00:00:00 2001 From: "STeve (Xin) Huang" Date: Thu, 30 Jan 2025 12:52:51 -0500 Subject: [PATCH 07/28] add MongoDB auto-user provisioning prereq (#49478) * add MongoDB auto-user provisioning prereq * try without Admonition --- .../database-access/auto-user-provisioning/mongodb.mdx | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/pages/enroll-resources/database-access/auto-user-provisioning/mongodb.mdx b/docs/pages/enroll-resources/database-access/auto-user-provisioning/mongodb.mdx index 32b58386d39a5..97bff21317697 100644 --- a/docs/pages/enroll-resources/database-access/auto-user-provisioning/mongodb.mdx +++ b/docs/pages/enroll-resources/database-access/auto-user-provisioning/mongodb.mdx @@ -11,6 +11,10 @@ description: Configure automatic user provisioning for MongoDB. - A self-hosted MongoDB database enrolled with your Teleport cluster. Follow the [Teleport documentation](../enroll-self-hosted-databases/mongodb-self-hosted.mdx) to learn how to enroll your database. + Your MongoDB database must have Role-Based Access Control (RBAC) enabled by + setting + [`security.authorization`](https://www.mongodb.com/docs/manual/reference/configuration-options/#mongodb-setting-security.authorization) + to `enabled` in the configuration file. - Ability to connect to and create user accounts in the target database. From f14c4c75aa081a7deaefb0d4f95456dbbd338b45 Mon Sep 17 00:00:00 2001 From: Zac Bergquist Date: Thu, 30 Jan 2025 11:01:56 -0700 Subject: [PATCH 08/28] Don't emit SSO failures to audit log for illegitimate requests (#51614) SSO flows are initiated via unauthenticated APIs, so it's not uncommon to see spam from security tools or malicious bots probing. If we can tell that these are not legitimate SSO attempts then we avoid emitting falures that clutter the audit log. --- lib/auth/auth_with_roles.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index 379467800318d..ba7b73c1a5e5a 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -4175,7 +4175,14 @@ func (a *ServerWithRoles) CreateGithubAuthRequest(ctx context.Context, req types githubReq, err := a.authServer.CreateGithubAuthRequest(ctx, req) if err != nil { - emitSSOLoginFailureEvent(a.authServer.closeCtx, a.authServer.emitter, events.LoginMethodGithub, err, req.SSOTestFlow) + if trace.IsNotFound(err) { + // This flow is triggered via an unauthenticated endpoint, so it's not unusual to see + // attempts to hit this API with an invalid connector ID. These are not legitimate SSO + // attempts, so avoid cluttering the audit log with them. + a.authServer.logger.InfoContext(ctx, "rejecting invalid GitHub auth request", "connector", req.ConnectorID) + } else { + emitSSOLoginFailureEvent(a.authServer.closeCtx, a.authServer.emitter, events.LoginMethodGithub, err, req.SSOTestFlow) + } return nil, trace.Wrap(err) } From c0b3e62f25301d17859ed05a4d34f5d34af6f75b Mon Sep 17 00:00:00 2001 From: Nic Klaassen Date: Thu, 30 Jan 2025 10:55:32 -0800 Subject: [PATCH 09/28] [vnet] refactor osconfig (#51577) * [vnet] refactor osconfig This PR refactors VNet's OS configuration code. There is now a `osConfigurator` struct that runs the OS configuration loop and calls out to a `targetOSConfigProvider` which determines the current target OS configuration. This `targetOSConfigProvider` can be implemented for the MacOS daemon process by moving some of the existing code around. In a following PR I will implement it for Windows which will call out to the user process to get the current target OS configuration. This PR doesn't really change any of the logic that runs on MacOS, it just refactors things so that we can swap in a different implementation for Windows. FYI, after I get the Windows code fully working, I plan to migrate the MacOS code to follow the new way Windows handles things, where the admin process handles all networking and calls out to the user process for everything that uses a Teleport client. This will unify multiple codepaths and eliminate the profileOSConfigProvider added here. * fix lint on linux * use ip-aware function to compute TUN address --- lib/vnet/admin_process_darwin.go | 57 +--- lib/vnet/osconfig.go | 262 ++++------------- lib/vnet/osconfig_darwin.go | 48 +--- lib/vnet/osconfig_windows.go | 31 +++ lib/vnet/profile_osconfig_provider_darwin.go | 278 +++++++++++++++++++ lib/vnet/unsupported_os.go | 19 ++ 6 files changed, 400 insertions(+), 295 deletions(-) create mode 100644 lib/vnet/osconfig_windows.go create mode 100644 lib/vnet/profile_osconfig_provider_darwin.go diff --git a/lib/vnet/admin_process_darwin.go b/lib/vnet/admin_process_darwin.go index f9ee788327842..b2aec9952f3d1 100644 --- a/lib/vnet/admin_process_darwin.go +++ b/lib/vnet/admin_process_darwin.go @@ -48,9 +48,15 @@ func RunDarwinAdminProcess(ctx context.Context, config daemon.Config) error { return trace.Wrap(err) } + osConfigProvider, err := newProfileOSConfigProvider(tunName, config.IPv6Prefix, config.DNSAddr, config.HomePath, config.ClientCred) + if err != nil { + return trace.Wrap(err, "creating profileOSConfigProvider") + } + osConfigurator := newOSConfigurator(osConfigProvider) + errCh := make(chan error) go func() { - errCh <- trace.Wrap(osConfigurationLoop(ctx, tunName, config.IPv6Prefix, config.DNSAddr, config.HomePath, config.ClientCred)) + errCh <- trace.Wrap(osConfigurator.runOSConfigurationLoop(ctx)) }() // Stay alive until we get an error on errCh, indicating that the osConfig loop exited. @@ -105,52 +111,3 @@ func createTUNDevice(ctx context.Context) (tun.Device, string, error) { } return dev, name, nil } - -// osConfigurationLoop will keep running until ctx is canceled or an unrecoverable error is encountered, in -// order to keep the host OS configuration up to date. -func osConfigurationLoop(ctx context.Context, tunName, ipv6Prefix, dnsAddr, homePath string, clientCred daemon.ClientCred) error { - osConfigurator, err := newOSConfigurator(tunName, ipv6Prefix, dnsAddr, homePath, clientCred) - if err != nil { - return trace.Wrap(err, "creating OS configurator") - } - defer func() { - if err := osConfigurator.close(); err != nil { - log.ErrorContext(ctx, "Error while closing OS configurator", "error", err) - } - }() - - // Clean up any stale configuration left by a previous VNet instance that may have failed to clean up. - // This is necessary in case any stale /etc/resolver/ entries are still present, we need to - // be able to reach the proxy in order to fetch the vnet_config. - if err := osConfigurator.deconfigureOS(ctx); err != nil { - return trace.Wrap(err, "cleaning up OS configuration on startup") - } - - defer func() { - // Shutting down, deconfigure OS. Pass context.Background because ctx has likely been canceled - // already but we still need to clean up. - if err := osConfigurator.deconfigureOS(context.Background()); err != nil { - log.ErrorContext(ctx, "Error deconfiguring host OS before shutting down.", "error", err) - } - }() - - if err := osConfigurator.updateOSConfiguration(ctx); err != nil { - return trace.Wrap(err, "applying initial OS configuration") - } - - // Re-configure the host OS every 10 seconds. This will pick up any newly logged-in clusters by - // reading profiles from TELEPORT_HOME. - const osConfigurationInterval = 10 * time.Second - ticker := time.NewTicker(osConfigurationInterval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if err := osConfigurator.updateOSConfiguration(ctx); err != nil { - return trace.Wrap(err, "updating OS configuration") - } - case <-ctx.Done(): - return ctx.Err() - } - } -} diff --git a/lib/vnet/osconfig.go b/lib/vnet/osconfig.go index 05ac1dcb3a688..1cbe009963ea9 100644 --- a/lib/vnet/osconfig.go +++ b/lib/vnet/osconfig.go @@ -14,25 +14,14 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -// TODO(nklaassen): refactor OS configuration so this file isn't -// platform-specific. -//go:build darwin -// +build darwin - package vnet import ( "context" - "net" + "net/netip" + "time" "github.com/gravitational/trace" - "github.com/jonboulle/clockwork" - - "github.com/gravitational/teleport/api/profile" - "github.com/gravitational/teleport/api/utils" - "github.com/gravitational/teleport/lib/client" - "github.com/gravitational/teleport/lib/client/clientcache" - "github.com/gravitational/teleport/lib/vnet/daemon" ) type osConfig struct { @@ -44,182 +33,33 @@ type osConfig struct { dnsZones []string } -type osConfigurator struct { - clientStore *client.Store - clientCache *clientcache.Cache - clusterConfigCache *ClusterConfigCache - // daemonClientCred are the credentials of the process that contacted the daemon. - daemonClientCred daemon.ClientCred - tunName string - tunIPv6 string - dnsAddr string - homePath string - tunIPv4 string +func configureOS(ctx context.Context, osConfig *osConfig) error { + return trace.Wrap(platformConfigureOS(ctx, osConfig)) } -func newOSConfigurator(tunName, ipv6Prefix, dnsAddr, homePath string, daemonClientCred daemon.ClientCred) (*osConfigurator, error) { - if homePath == "" { - // This runs as root so we need to be configured with the user's home path. - return nil, trace.BadParameter("homePath must be passed from unprivileged process") - } - - // ipv6Prefix always looks like "fdxx:xxxx:xxxx::" - // Set the IPv6 address for the TUN to "fdxx:xxxx:xxxx::1", the first valid address in the range. - tunIPv6 := ipv6Prefix + "1" - - configurator := &osConfigurator{ - tunName: tunName, - tunIPv6: tunIPv6, - dnsAddr: dnsAddr, - homePath: homePath, - clientStore: client.NewFSClientStore(homePath), - daemonClientCred: daemonClientCred, - } - configurator.clusterConfigCache = NewClusterConfigCache(clockwork.NewRealClock()) - - clientCache, err := clientcache.New(clientcache.Config{ - NewClientFunc: configurator.getClient, - RetryWithReloginFunc: func(ctx context.Context, tc *client.TeleportClient, fn func() error, opts ...client.RetryWithReloginOption) error { - // osConfigurator is ran from a root process, so there's no way for it to relogin. - // Instead, osConfigurator depends on the user performing a relogin from another process. - return trace.Wrap(fn()) - }, - }) - if err != nil { - return nil, trace.Wrap(err) - } - configurator.clientCache = clientCache - - return configurator, nil +type targetOSConfigProvider interface { + targetOSConfig(context.Context) (*osConfig, error) } -func (c *osConfigurator) close() error { - return trace.Wrap(c.clientCache.Clear()) +type osConfigurator struct { + targetOSConfigProvider targetOSConfigProvider } -// updateOSConfiguration reads tsh profiles out of [c.homePath]. For each profile, it reads the VNet -// config of the root cluster and of each leaf cluster. Then it proceeds to update the OS based on -// information from that config. -// -// For the duration of reading data from clusters, it drops the root privileges, only to regain them -// before configuring the OS. -func (c *osConfigurator) updateOSConfiguration(ctx context.Context) error { - var dnsZones []string - var cidrRanges []string - - // Drop privileges to ensure that the user who spawned the daemon client has privileges necessary - // to access c.homePath that it sent when starting the daemon. - // Otherwise a client could make the daemon read a profile out of any directory. - if err := c.doWithDroppedRootPrivileges(ctx, func() error { - profileNames, err := profile.ListProfileNames(c.homePath) - if err != nil { - return trace.Wrap(err, "listing user profiles") - } - for _, profileName := range profileNames { - profileDNSZones, profileCIDRRanges := c.getDNSZonesAndCIDRRangesForProfile(ctx, profileName) - dnsZones = append(dnsZones, profileDNSZones...) - cidrRanges = append(cidrRanges, profileCIDRRanges...) - } - return nil - }); err != nil { - return trace.Wrap(err) - } - - dnsZones = utils.Deduplicate(dnsZones) - cidrRanges = utils.Deduplicate(cidrRanges) - - if c.tunIPv4 == "" && len(cidrRanges) > 0 { - // Choose an IPv4 address for the TUN interface from the CIDR range of one arbitrary currently - // logged-in cluster. Only one IPv4 address is needed. - if err := c.setTunIPv4FromCIDR(cidrRanges[0]); err != nil { - return trace.Wrap(err, "setting TUN IPv4 address") - } +func newOSConfigurator(targetOSConfigProvider targetOSConfigProvider) *osConfigurator { + return &osConfigurator{ + targetOSConfigProvider: targetOSConfigProvider, } - - err := configureOS(ctx, &osConfig{ - tunName: c.tunName, - tunIPv6: c.tunIPv6, - tunIPv4: c.tunIPv4, - dnsAddr: c.dnsAddr, - dnsZones: dnsZones, - cidrRanges: cidrRanges, - }) - return trace.Wrap(err, "configuring OS") } -// getDNSZonesAndCIDRRangesForProfile returns DNS zones and CIDR ranges for the root cluster and its -// leaf clusters. -// -// It's important for this function to return any data it manages to collect. For example, if it -// manages to grab DNS zones and CIDR ranges of the root cluster but it fails to list leaf clusters, -// it should still return the zones and ranges of the root cluster. Hence the use of named return -// values. -func (c *osConfigurator) getDNSZonesAndCIDRRangesForProfile(ctx context.Context, profileName string) (dnsZones []string, cidrRanges []string) { - shouldClearCacheForRoot := true - defer func() { - if shouldClearCacheForRoot { - if err := c.clientCache.ClearForRoot(profileName); err != nil { - log.ErrorContext(ctx, "Error while clearing client cache", "profile", profileName, "error", err) - } - } - }() - - rootClient, err := c.clientCache.Get(ctx, profileName, "" /*leafClusterName*/) - if err != nil { - log.WarnContext(ctx, - "Failed to get root cluster client from cache, profile may be expired, not configuring VNet for this cluster", - "profile", profileName, "error", err) - - return - } - clusterConfig, err := c.clusterConfigCache.GetClusterConfig(ctx, rootClient) +func (c *osConfigurator) updateOSConfiguration(ctx context.Context) error { + desiredOSConfig, err := c.targetOSConfigProvider.targetOSConfig(ctx) if err != nil { - log.WarnContext(ctx, - "Failed to load VNet configuration, profile may be expired, not configuring VNet for this cluster", - "profile", profileName, "error", err) - - return + return trace.Wrap(err) } - - dnsZones = append(dnsZones, clusterConfig.DNSZones...) - cidrRanges = append(cidrRanges, clusterConfig.IPv4CIDRRange) - - leafClusters, err := getLeafClusters(ctx, rootClient) - if err != nil { - log.WarnContext(ctx, - "Failed to list leaf clusters, profile may be expired, not configuring VNet for leaf clusters of this cluster", - "profile", profileName, "error", err) - - return + if err := configureOS(ctx, desiredOSConfig); err != nil { + return trace.Wrap(err, "configuring OS") } - - // getLeafClusters was the last call using the root client. Do not clear cache if any call to - // a leaf cluster fails – it might fail because of a problem with the leaf cluster, not because of - // an expired cert. - shouldClearCacheForRoot = false - - for _, leafClusterName := range leafClusters { - clusterClient, err := c.clientCache.Get(ctx, profileName, leafClusterName) - if err != nil { - log.WarnContext(ctx, - "Failed to create leaf cluster client, not configuring VNet for this cluster", - "profile", profileName, "leaf_cluster", leafClusterName, "error", err) - continue - } - - clusterConfig, err := c.clusterConfigCache.GetClusterConfig(ctx, clusterClient) - if err != nil { - log.WarnContext(ctx, - "Failed to load VNet configuration, not configuring VNet for this cluster", - "profile", profileName, "leaf_cluster", leafClusterName, "error", err) - continue - } - - dnsZones = append(dnsZones, clusterConfig.DNSZones...) - cidrRanges = append(cidrRanges, clusterConfig.IPv4CIDRRange) - } - - return + return nil } func (c *osConfigurator) deconfigureOS(ctx context.Context) error { @@ -227,36 +67,56 @@ func (c *osConfigurator) deconfigureOS(ctx context.Context) error { return trace.Wrap(configureOS(ctx, &osConfig{})) } -func (c *osConfigurator) setTunIPv4FromCIDR(cidrRange string) error { - if c.tunIPv4 != "" { - return nil +// runOSConfigurationLoop will keep running until ctx is canceled or an +// unrecoverable error is encountered, in order to keep the host OS +// configuration up to date. +func (c *osConfigurator) runOSConfigurationLoop(ctx context.Context) error { + // Clean up any stale configuration left by a previous VNet instance that + // may have failed to clean up. This is necessary in case any stale DNS + // configuration is still present, we need to make sure the proxy is + // reachable without hitting the VNet DNS resolver which is not ready yet. + if err := c.deconfigureOS(ctx); err != nil { + return trace.Wrap(err, "cleaning up OS configuration on startup") } - _, ipnet, err := net.ParseCIDR(cidrRange) - if err != nil { - return trace.Wrap(err, "parsing CIDR %q", cidrRange) + if err := c.updateOSConfiguration(ctx); err != nil { + return trace.Wrap(err, "applying initial OS configuration") } - // ipnet.IP is the network address, ending in 0s, like 100.64.0.0 - // Add 1 to assign the TUN address, like 100.64.0.1 - tunAddress := ipnet.IP - tunAddress[len(tunAddress)-1]++ - c.tunIPv4 = tunAddress.String() - return nil -} + defer func() { + // Shutting down, deconfigure OS. Pass context.Background because ctx has likely been canceled + // already but we still need to clean up. + if err := c.deconfigureOS(context.Background()); err != nil { + log.ErrorContext(ctx, "Error deconfiguring host OS before shutting down.", "error", err) + } + }() -func (c *osConfigurator) getClient(ctx context.Context, profileName, leafClusterName string) (*client.TeleportClient, error) { - // This runs in the root process, so obviously we don't have access to the client cache in the user - // process. This loads cluster profiles and credentials from TELEPORT_HOME. - clientConfig := &client.Config{ - ClientStore: c.clientStore, + // Re-configure the host OS every 10 seconds to pick up any newly logged-in + // clusters or updated DNS zones or CIDR ranges. + const osConfigurationInterval = 10 * time.Second + tick := time.Tick(osConfigurationInterval) + for { + select { + case <-tick: + if err := c.updateOSConfiguration(ctx); err != nil { + return trace.Wrap(err, "updating OS configuration") + } + case <-ctx.Done(): + return trace.Wrap(ctx.Err(), "context canceled, shutting down os configuration loop") + } } - if err := clientConfig.LoadProfile(c.clientStore, profileName); err != nil { - return nil, trace.Wrap(err, "loading client profile") +} + +// tunIPv6ForPrefix returns the IPv6 address to assign to the TUN interface under +// ipv6Prefix. It always returns the second address in the range because the +// first address (ending with all zeros) is the subnet router anycast address. +func tunIPv6ForPrefix(ipv6Prefix string) (string, error) { + addr, err := netip.ParseAddr(ipv6Prefix) + if err != nil { + return "", trace.Wrap(err, "parsing IPv6 prefix %s", ipv6Prefix) } - if leafClusterName != "" { - clientConfig.SiteName = leafClusterName + if !addr.Is6() { + return "", trace.BadParameter("IPv6 prefix %s is not an IPv6 address", ipv6Prefix) } - tc, err := client.NewClient(clientConfig) - return tc, trace.Wrap(err) + return addr.Next().String(), nil } diff --git a/lib/vnet/osconfig_darwin.go b/lib/vnet/osconfig_darwin.go index 27864c80bb400..1fd257a7886c4 100644 --- a/lib/vnet/osconfig_darwin.go +++ b/lib/vnet/osconfig_darwin.go @@ -22,15 +22,14 @@ import ( "os" "os/exec" "path/filepath" - "sync/atomic" - "syscall" "github.com/gravitational/trace" ) -// configureOS configures the host OS according to [cfg]. It is safe to call repeatedly, and it is meant to be -// called with an empty [osConfig] to deconfigure anything necessary before exiting. -func configureOS(ctx context.Context, cfg *osConfig) error { +// platformConfigureOS configures the host OS according to cfg. It is safe to +// call repeatedly, and it is meant to be called with an empty osConfig to +// deconfigure anything necessary before exiting. +func platformConfigureOS(ctx context.Context, cfg *osConfig) error { // There is no need to remove IP addresses or routes, they will automatically be cleaned up when the // process exits and the TUN is deleted. @@ -141,42 +140,3 @@ func vnetManagedResolverFiles() (map[string]struct{}, error) { } return matchingFiles, nil } - -var hasDroppedPrivileges atomic.Bool - -// doWithDroppedRootPrivileges drops the privileges of the current process to those of the client -// process that called the VNet daemon. -func (c *osConfigurator) doWithDroppedRootPrivileges(ctx context.Context, fn func() error) (err error) { - if !hasDroppedPrivileges.CompareAndSwap(false, true) { - // At the moment of writing, the VNet daemon wasn't expected to do multiple things in parallel - // with dropped privileges. If you run into this error, consider if employing a mutex is going - // to be enough or if a more elaborate refactoring is required. - return trace.CompareFailed("privileges are being temporarily dropped already") - } - defer hasDroppedPrivileges.Store(false) - - rootEgid := os.Getegid() - rootEuid := os.Geteuid() - - log.InfoContext(ctx, "Temporarily dropping root privileges.", "egid", c.daemonClientCred.Egid, "euid", c.daemonClientCred.Euid) - - if err := syscall.Setegid(c.daemonClientCred.Egid); err != nil { - panic(trace.Wrap(err, "setting egid")) - } - if err := syscall.Seteuid(c.daemonClientCred.Euid); err != nil { - panic(trace.Wrap(err, "setting euid")) - } - - defer func() { - if err := syscall.Seteuid(rootEuid); err != nil { - panic(trace.Wrap(err, "reverting euid")) - } - if err := syscall.Setegid(rootEgid); err != nil { - panic(trace.Wrap(err, "reverting egid")) - } - - log.InfoContext(ctx, "Restored root privileges.", "egid", rootEgid, "euid", rootEuid) - }() - - return trace.Wrap(fn()) -} diff --git a/lib/vnet/osconfig_windows.go b/lib/vnet/osconfig_windows.go new file mode 100644 index 0000000000000..aa47ec43804bb --- /dev/null +++ b/lib/vnet/osconfig_windows.go @@ -0,0 +1,31 @@ +// Teleport +// Copyright (C) 2025 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package vnet + +import ( + "context" + + "github.com/gravitational/trace" +) + +// platformConfigureOS configures the host OS according to cfg. It is safe to +// call repeatedly, and it is meant to be called with an empty osConfig to +// deconfigure anything necessary before exiting. +func platformConfigureOS(ctx context.Context, cfg *osConfig) error { + // TODO(nklaassen): implement platformConfigureOS for Windows. + return trace.NotImplemented("platformConfigureOS is not implemented on Windows") +} diff --git a/lib/vnet/profile_osconfig_provider_darwin.go b/lib/vnet/profile_osconfig_provider_darwin.go new file mode 100644 index 0000000000000..8aea54d0175fd --- /dev/null +++ b/lib/vnet/profile_osconfig_provider_darwin.go @@ -0,0 +1,278 @@ +// Teleport +// Copyright (C) 2025 Gravitational, Inc. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package vnet + +import ( + "context" + "net" + "os" + "sync/atomic" + "syscall" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" + + "github.com/gravitational/teleport/api/profile" + "github.com/gravitational/teleport/api/utils" + "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/clientcache" + "github.com/gravitational/teleport/lib/vnet/daemon" +) + +// profileOSConfigProvider implements targetOSConfigProvider for the MacOS +// daemon process. It reads all active profiles from the user's TELEPORT_HOME +// and uses the user certs found there to dial to each cluster to find the +// current vnet_config resource to compute the current target OS configuration. +type profileOSConfigProvider struct { + clientStore *client.Store + clientCache *clientcache.Cache + clusterConfigCache *ClusterConfigCache + // daemonClientCred are the credentials of the process that contacted the daemon. + daemonClientCred daemon.ClientCred + tunName string + tunIPv6 string + dnsAddr string + homePath string + tunIPv4 string +} + +func newProfileOSConfigProvider(tunName, ipv6Prefix, dnsAddr, homePath string, daemonClientCred daemon.ClientCred) (*profileOSConfigProvider, error) { + if homePath == "" { + // This runs as root so we need to be configured with the user's home path. + return nil, trace.BadParameter("homePath must be passed from unprivileged process") + } + tunIP, err := tunIPv6ForPrefix(ipv6Prefix) + if err != nil { + return nil, trace.Wrap(err) + } + + p := &profileOSConfigProvider{ + clientStore: client.NewFSClientStore(homePath), + clusterConfigCache: NewClusterConfigCache(clockwork.NewRealClock()), + daemonClientCred: daemonClientCred, + tunName: tunName, + tunIPv6: tunIP, + dnsAddr: dnsAddr, + homePath: homePath, + } + clientCache, err := clientcache.New(clientcache.Config{ + NewClientFunc: p.getClient, + RetryWithReloginFunc: func(ctx context.Context, tc *client.TeleportClient, fn func() error, opts ...client.RetryWithReloginOption) error { + // profileOSConfigProvider runs in the MacOS daemon process, there's no way for it to relogin. + // Instead, osConfigurator depends on the user performing a relogin from another process. + return trace.Wrap(fn()) + }, + }) + if err != nil { + return nil, trace.Wrap(err) + } + p.clientCache = clientCache + return p, nil +} + +func (p *profileOSConfigProvider) targetOSConfig(ctx context.Context) (*osConfig, error) { + var ( + dnsZones []string + cidrRanges []string + ) + + // Drop privileges to ensure that the user who spawned the daemon client has privileges necessary + // to access p.homePath that it sent when starting the daemon. + // Otherwise a client could make the daemon read a profile out of any directory. + if err := doWithDroppedRootPrivileges(ctx, p.daemonClientCred, func() error { + profileNames, err := profile.ListProfileNames(p.homePath) + if err != nil { + return trace.Wrap(err, "listing user profiles") + } + for _, profileName := range profileNames { + profileDNSZones, profileCIDRRanges := p.getDNSZonesAndCIDRRangesForProfile(ctx, profileName) + dnsZones = append(dnsZones, profileDNSZones...) + cidrRanges = append(cidrRanges, profileCIDRRanges...) + } + return nil + }); err != nil { + return nil, trace.Wrap(err) + } + + dnsZones = utils.Deduplicate(dnsZones) + cidrRanges = utils.Deduplicate(cidrRanges) + + if p.tunIPv4 == "" && len(cidrRanges) > 0 { + // Choose an IPv4 address for the TUN interface from the CIDR range of one arbitrary currently + // logged-in cluster. Only one IPv4 address is needed. + if err := p.setTunIPv4FromCIDR(cidrRanges[0]); err != nil { + return nil, trace.Wrap(err, "setting TUN IPv4 address") + } + } + + return &osConfig{ + tunName: p.tunName, + tunIPv6: p.tunIPv6, + tunIPv4: p.tunIPv4, + dnsAddr: p.dnsAddr, + dnsZones: dnsZones, + cidrRanges: cidrRanges, + }, nil +} + +// getDNSZonesAndCIDRRangesForProfile returns DNS zones and CIDR ranges for the root cluster and its +// leaf clusters. +// +// It's important for this function to return any data it manages to collect. For example, if it +// manages to grab DNS zones and CIDR ranges of the root cluster but it fails to list leaf clusters, +// it should still return the zones and ranges of the root cluster. Hence the use of named return +// values. +func (p *profileOSConfigProvider) getDNSZonesAndCIDRRangesForProfile(ctx context.Context, profileName string) (dnsZones []string, cidrRanges []string) { + shouldClearCacheForRoot := true + defer func() { + if shouldClearCacheForRoot { + if err := p.clientCache.ClearForRoot(profileName); err != nil { + log.ErrorContext(ctx, "Error while clearing client cache", "profile", profileName, "error", err) + } + } + }() + + rootClient, err := p.clientCache.Get(ctx, profileName, "" /*leafClusterName*/) + if err != nil { + log.WarnContext(ctx, + "Failed to get root cluster client from cache, profile may be expired, not configuring VNet for this cluster", + "profile", profileName, "error", err) + + return + } + clusterConfig, err := p.clusterConfigCache.GetClusterConfig(ctx, rootClient) + if err != nil { + log.WarnContext(ctx, + "Failed to load VNet configuration, profile may be expired, not configuring VNet for this cluster", + "profile", profileName, "error", err) + + return + } + + dnsZones = append(dnsZones, clusterConfig.DNSZones...) + cidrRanges = append(cidrRanges, clusterConfig.IPv4CIDRRange) + + leafClusters, err := getLeafClusters(ctx, rootClient) + if err != nil { + log.WarnContext(ctx, + "Failed to list leaf clusters, profile may be expired, not configuring VNet for leaf clusters of this cluster", + "profile", profileName, "error", err) + + return + } + + // getLeafClusters was the last call using the root client. Do not clear cache if any call to + // a leaf cluster fails – it might fail because of a problem with the leaf cluster, not because of + // an expired cert. + shouldClearCacheForRoot = false + + for _, leafClusterName := range leafClusters { + clusterClient, err := p.clientCache.Get(ctx, profileName, leafClusterName) + if err != nil { + log.WarnContext(ctx, + "Failed to create leaf cluster client, not configuring VNet for this cluster", + "profile", profileName, "leaf_cluster", leafClusterName, "error", err) + continue + } + + clusterConfig, err := p.clusterConfigCache.GetClusterConfig(ctx, clusterClient) + if err != nil { + log.WarnContext(ctx, + "Failed to load VNet configuration, not configuring VNet for this cluster", + "profile", profileName, "leaf_cluster", leafClusterName, "error", err) + continue + } + + dnsZones = append(dnsZones, clusterConfig.DNSZones...) + cidrRanges = append(cidrRanges, clusterConfig.IPv4CIDRRange) + } + + return +} + +func (p *profileOSConfigProvider) setTunIPv4FromCIDR(cidrRange string) error { + if p.tunIPv4 != "" { + return nil + } + + _, ipnet, err := net.ParseCIDR(cidrRange) + if err != nil { + return trace.Wrap(err, "parsing CIDR %q", cidrRange) + } + + // ipnet.IP is the network address, ending in 0s, like 100.64.0.0 + // Add 1 to assign the TUN address, like 100.64.0.1 + tunAddress := ipnet.IP + tunAddress[len(tunAddress)-1]++ + p.tunIPv4 = tunAddress.String() + return nil +} + +func (p *profileOSConfigProvider) getClient(ctx context.Context, profileName, leafClusterName string) (*client.TeleportClient, error) { + // This runs in the root process, so obviously we don't have access to the client cache in the user + // process. This loads cluster profiles and credentials from TELEPORT_HOME. + clientConfig := &client.Config{ + ClientStore: p.clientStore, + } + if err := clientConfig.LoadProfile(p.clientStore, profileName); err != nil { + return nil, trace.Wrap(err, "loading client profile") + } + if leafClusterName != "" { + clientConfig.SiteName = leafClusterName + } + tc, err := client.NewClient(clientConfig) + return tc, trace.Wrap(err) +} + +var hasDroppedPrivileges atomic.Bool + +// doWithDroppedRootPrivileges drops the privileges of the current process to those of the client +// process that called the VNet daemon. +func doWithDroppedRootPrivileges(ctx context.Context, clientCred daemon.ClientCred, fn func() error) (err error) { + if !hasDroppedPrivileges.CompareAndSwap(false, true) { + // At the moment of writing, the VNet daemon wasn't expected to do multiple things in parallel + // with dropped privileges. If you run into this error, consider if employing a mutex is going + // to be enough or if a more elaborate refactoring is required. + return trace.CompareFailed("privileges are being temporarily dropped already") + } + defer hasDroppedPrivileges.Store(false) + + rootEgid := os.Getegid() + rootEuid := os.Geteuid() + + log.InfoContext(ctx, "Temporarily dropping root privileges.", "egid", clientCred.Egid, "euid", clientCred.Euid) + + if err := syscall.Setegid(clientCred.Egid); err != nil { + panic(trace.Wrap(err, "setting egid")) + } + if err := syscall.Seteuid(clientCred.Euid); err != nil { + panic(trace.Wrap(err, "setting euid")) + } + + defer func() { + if err := syscall.Seteuid(rootEuid); err != nil { + panic(trace.Wrap(err, "reverting euid")) + } + if err := syscall.Setegid(rootEgid); err != nil { + panic(trace.Wrap(err, "reverting egid")) + } + + log.InfoContext(ctx, "Restored root privileges.", "egid", rootEgid, "euid", rootEuid) + }() + + return trace.Wrap(fn()) +} diff --git a/lib/vnet/unsupported_os.go b/lib/vnet/unsupported_os.go index a807101f88801..1b65e366ee44c 100644 --- a/lib/vnet/unsupported_os.go +++ b/lib/vnet/unsupported_os.go @@ -32,3 +32,22 @@ var ErrVnetNotImplemented = &trace.NotImplementedError{Message: "VNet is not imp func runPlatformUserProcess(_ context.Context, _ *UserProcessConfig) (*ProcessManager, error) { return nil, trace.Wrap(ErrVnetNotImplemented) } + +func platformConfigureOS(_ context.Context, _ *osConfig) error { + return trace.Wrap(ErrVnetNotImplemented) +} + +// Satisfy unused linter. +var ( + _ = osConfig{ + tunName: "", + tunIPv4: "", + tunIPv6: "", + dnsAddr: "", + cidrRanges: nil, + dnsZones: nil, + } + _ = tunIPv6ForPrefix + _ = newOSConfigurator + _ = (*osConfigurator).runOSConfigurationLoop +) From e3c36a0dbf9db39667bbec076fc9a2d74a321d37 Mon Sep 17 00:00:00 2001 From: Vadym Popov Date: Thu, 30 Jan 2025 12:07:49 -0800 Subject: [PATCH 10/28] Remove timeout for TestUpdate (#51666) --- integration/autoupdate/tools/updater_test.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/integration/autoupdate/tools/updater_test.go b/integration/autoupdate/tools/updater_test.go index a64ec9a1f9d0f..0c05f2524caf0 100644 --- a/integration/autoupdate/tools/updater_test.go +++ b/integration/autoupdate/tools/updater_test.go @@ -46,8 +46,7 @@ var ( // an update to a newer version, expecting it to re-execute with the updated version. func TestUpdate(t *testing.T) { t.Setenv(types.HomeEnvVar, t.TempDir()) - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() + ctx := context.Background() // Fetch compiled test binary with updater logic and install to $TELEPORT_HOME. updater := tools.NewUpdater( @@ -88,8 +87,7 @@ func TestUpdate(t *testing.T) { // the command with the updated version without any new downloads. func TestParallelUpdate(t *testing.T) { t.Setenv(types.HomeEnvVar, t.TempDir()) - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() + ctx := context.Background() // Initial fetch the updater binary un-archive and replace. updater := tools.NewUpdater( @@ -162,8 +160,7 @@ func TestParallelUpdate(t *testing.T) { // TestUpdateInterruptSignal verifies the interrupt signal send to the process must stop downloading. func TestUpdateInterruptSignal(t *testing.T) { t.Setenv(types.HomeEnvVar, t.TempDir()) - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() + ctx := context.Background() // Initial fetch the updater binary un-archive and replace. updater := tools.NewUpdater( From b69d49b2a223dd1833bdd0861831c87dc77dc66f Mon Sep 17 00:00:00 2001 From: Gavin Frazar Date: Thu, 30 Jan 2025 12:14:45 -0800 Subject: [PATCH 11/28] Simplify lib/utils/aws (#51627) This eliminates the SigningService in favor of a free function SignRequest, which eliminates AWS credential loading and simplifies the AWS utils package. In the process of doing this refactor, the AWS app handler has been changed to use AWS SDK v2 to load its credentials as well. --- integration/proxy/proxy_helpers.go | 8 +- integration/proxy/proxy_test.go | 4 +- lib/cloud/awsconfig/awsconfig.go | 65 ++++++++- lib/cloud/awsconfig/awsconfig_test.go | 4 +- lib/service/service.go | 13 ++ lib/srv/alpnproxy/aws_local_proxy.go | 4 +- lib/srv/app/aws/handler.go | 30 ++-- lib/srv/app/aws/handler_test.go | 74 +++++----- lib/srv/app/connections_handler.go | 18 +-- lib/srv/app/server_test.go | 2 + lib/srv/db/dynamodb/engine.go | 47 ++++--- lib/srv/db/dynamodb/test.go | 2 +- lib/srv/db/opensearch/engine.go | 49 +++---- lib/utils/aws/aws.go | 11 +- lib/utils/aws/credentials.go | 172 ----------------------- lib/utils/aws/credentials_test.go | 189 -------------------------- lib/utils/aws/signing.go | 140 ++----------------- 17 files changed, 219 insertions(+), 613 deletions(-) delete mode 100644 lib/utils/aws/credentials_test.go diff --git a/integration/proxy/proxy_helpers.go b/integration/proxy/proxy_helpers.go index 3ce8a4f17657e..f41a6f4513b4e 100644 --- a/integration/proxy/proxy_helpers.go +++ b/integration/proxy/proxy_helpers.go @@ -33,7 +33,7 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jackc/pgconn" @@ -605,7 +605,7 @@ func mustParseURL(t *testing.T, rawURL string) *url.URL { type fakeSTSClient struct { accountID string arn string - credentials *credentials.Credentials + credentials aws.CredentialsProvider } func (f fakeSTSClient) Do(req *http.Request) (*http.Response, error) { @@ -640,10 +640,10 @@ func mustCreateIAMJoinProvisionToken(t *testing.T, name, awsAccountID, allowedAR return provisionToken } -func mustRegisterUsingIAMMethod(t *testing.T, proxyAddr utils.NetAddr, token string, credentials *credentials.Credentials) { +func mustRegisterUsingIAMMethod(t *testing.T, proxyAddr utils.NetAddr, token string, credentials aws.CredentialsProvider) { t.Helper() - cred, err := credentials.Get() + cred, err := credentials.Retrieve(context.Background()) require.NoError(t, err) t.Setenv("AWS_ACCESS_KEY_ID", cred.AccessKeyID) diff --git a/integration/proxy/proxy_test.go b/integration/proxy/proxy_test.go index 262b8c1046726..b230a04c0e549 100644 --- a/integration/proxy/proxy_test.go +++ b/integration/proxy/proxy_test.go @@ -31,7 +31,7 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/credentials" "github.com/google/uuid" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" @@ -1739,7 +1739,7 @@ func TestALPNSNIProxyGRPCInsecure(t *testing.T) { nodeAccount := "123456789012" nodeRoleARN := "arn:aws:iam::123456789012:role/test" - nodeCredentials := credentials.NewStaticCredentials("FAKE_ID", "FAKE_KEY", "FAKE_TOKEN") + nodeCredentials := credentials.NewStaticCredentialsProvider("FAKE_ID", "FAKE_KEY", "FAKE_TOKEN") provisionToken := mustCreateIAMJoinProvisionToken(t, "iam-join-token", nodeAccount, nodeRoleARN) suite := newSuite(t, diff --git a/lib/cloud/awsconfig/awsconfig.go b/lib/cloud/awsconfig/awsconfig.go index 245fe8a9a6b23..5e6967d2d4909 100644 --- a/lib/cloud/awsconfig/awsconfig.go +++ b/lib/cloud/awsconfig/awsconfig.go @@ -18,12 +18,16 @@ package awsconfig import ( "context" + "crypto/sha1" + "encoding/hex" + "fmt" "log/slog" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/service/sts" + ststypes "github.com/aws/aws-sdk-go-v2/service/sts/types" "github.com/aws/smithy-go/tracing/smithyoteltracing" "github.com/gravitational/trace" "go.opentelemetry.io/otel" @@ -69,7 +73,12 @@ type AssumeRole struct { // RoleARN is the ARN of the role to assume. RoleARN string `json:"role_arn"` // ExternalID is an optional ID to include when assuming the role. - ExternalID string `json:"external_id"` + ExternalID string `json:"external_id,omitempty"` + // SessionName is an optional session name to use when assuming the role. + SessionName string `json:"session_name,omitempty"` + // Tags is a list of STS session tags to pass when assuming the role. + // https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + Tags map[string]string `json:"tags,omitempty"` } // options is a struct of additional options for assuming an AWS role @@ -153,6 +162,18 @@ func WithAssumeRole(roleARN, externalID string) OptionsFn { } } +// WithDetailedAssumeRole configures options needed for assuming an AWS role, +// including optional details like session name, duration, and tags. +func WithDetailedAssumeRole(ar AssumeRole) OptionsFn { + return func(options *options) { + if ar.RoleARN == "" { + // ignore empty role ARN for caller convenience. + return + } + options.assumeRoles = append(options.assumeRoles, ar) + } +} + // WithRetryer sets a custom retryer for the config. func WithRetryer(retryer func() aws.Retryer) OptionsFn { return func(options *options) { @@ -302,6 +323,13 @@ func getAssumeRoleProvider(ctx context.Context, clt stscreds.AssumeRoleAPIClient if role.ExternalID != "" { aro.ExternalID = aws.String(role.ExternalID) } + aro.RoleSessionName = maybeHashRoleSessionName(role.SessionName) + for k, v := range role.Tags { + aro.Tags = append(aro.Tags, ststypes.Tag{ + Key: aws.String(k), + Value: aws.String(v), + }) + } }) } @@ -342,3 +370,38 @@ func (p *integrationCredentialsProvider) Retrieve(ctx context.Context) (aws.Cred ).Retrieve(ctx) return cred, trace.Wrap(err) } + +// maybeHashRoleSessionName truncates the role session name and adds a hash +// when the original role session name is greater than AWS character limit +// (64). +func maybeHashRoleSessionName(roleSessionName string) (ret string) { + // maxRoleSessionNameLength is the maximum length of the role session name + // used by the AssumeRole call. + // https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html + const maxRoleSessionNameLength = 64 + if len(roleSessionName) <= maxRoleSessionNameLength { + return roleSessionName + } + + const hashLen = 16 + hash := sha1.New() + hash.Write([]byte(roleSessionName)) + hex := hex.EncodeToString(hash.Sum(nil))[:hashLen] + + // "1" for the delimiter. + keepPrefixIndex := maxRoleSessionNameLength - len(hex) - 1 + + // Sanity check. This should never happen since hash length and + // MaxRoleSessionNameLength are both constant. + if keepPrefixIndex < 0 { + keepPrefixIndex = 0 + } + + ret = fmt.Sprintf("%s-%s", roleSessionName[:keepPrefixIndex], hex) + slog.DebugContext(context.Background(), + "AWS role session name is too long. Using a hash instead.", + "hashed", ret, + "original", roleSessionName, + ) + return ret +} diff --git a/lib/cloud/awsconfig/awsconfig_test.go b/lib/cloud/awsconfig/awsconfig_test.go index 2de624fe86c54..ca6d5a1576976 100644 --- a/lib/cloud/awsconfig/awsconfig_test.go +++ b/lib/cloud/awsconfig/awsconfig_test.go @@ -251,12 +251,12 @@ func testGetConfigIntegration(t *testing.T, provider Provider) { func TestNewCacheKey(t *testing.T) { roleChain := []AssumeRole{ {RoleARN: "roleA"}, - {RoleARN: "roleB", ExternalID: "abc123"}, + {RoleARN: "roleB", ExternalID: "abc123", SessionName: "alice", Tags: map[string]string{"AKey": "AValue"}}, } got, err := newCacheKey("integration-name", roleChain...) require.NoError(t, err) want := strings.TrimSpace(` -{"integration":"integration-name","role_chain":[{"role_arn":"roleA","external_id":""},{"role_arn":"roleB","external_id":"abc123"}]} +{"integration":"integration-name","role_chain":[{"role_arn":"roleA"},{"role_arn":"roleB","external_id":"abc123","session_name":"alice","tags":{"AKey":"AValue"}}]} `) require.Equal(t, want, got) } diff --git a/lib/service/service.go b/lib/service/service.go index 7003d108b9843..4d43026364c50 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -112,6 +112,7 @@ import ( pgrepl "github.com/gravitational/teleport/lib/client/db/postgres/repl" dbrepl "github.com/gravitational/teleport/lib/client/db/repl" "github.com/gravitational/teleport/lib/cloud" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/cloud/gcp" "github.com/gravitational/teleport/lib/cloud/imds" awsimds "github.com/gravitational/teleport/lib/cloud/imds/aws" @@ -4889,6 +4890,12 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { return awsoidc.NewSessionV1(ctx, conn.Client, region, integration) } + awsConfigProvider, err := awsconfig.NewCache(awsconfig.WithDefaults( + awsconfig.WithOIDCIntegrationClient(conn.Client), + )) + if err != nil { + return trace.Wrap(err, "unable to create AWS config provider cache") + } connectionsHandler, err := app.NewConnectionsHandler(process.GracefulExitContext(), &app.ConnectionsHandlerConfig{ Clock: process.Clock, @@ -4903,6 +4910,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error { CipherSuites: cfg.CipherSuites, ServiceComponent: teleport.ComponentWebProxy, AWSSessionProvider: awsSessionGetter, + AWSConfigProvider: awsConfigProvider, }) if err != nil { return trace.Wrap(err) @@ -6218,6 +6226,10 @@ func (process *TeleportProcess) initApps() { return trace.Wrap(err) } + awsConfigProvider, err := awsconfig.NewCache() + if err != nil { + return trace.Wrap(err, "unable to create AWS config provider cache") + } connectionsHandler, err := app.NewConnectionsHandler(process.ExitContext(), &app.ConnectionsHandlerConfig{ Clock: process.Config.Clock, DataDir: process.Config.DataDir, @@ -6232,6 +6244,7 @@ func (process *TeleportProcess) initApps() { ServiceComponent: teleport.ComponentApp, Logger: logger, AWSSessionProvider: awsutils.SessionProviderUsingAmbientCredentials(), + AWSConfigProvider: awsConfigProvider, }) if err != nil { return trace.Wrap(err) diff --git a/lib/srv/alpnproxy/aws_local_proxy.go b/lib/srv/alpnproxy/aws_local_proxy.go index 77980930b2f37..2498058e927d1 100644 --- a/lib/srv/alpnproxy/aws_local_proxy.go +++ b/lib/srv/alpnproxy/aws_local_proxy.go @@ -134,7 +134,7 @@ func (m *AWSAccessMiddleware) HandleRequest(rw http.ResponseWriter, req *http.Re } func (m *AWSAccessMiddleware) handleCommonRequest(rw http.ResponseWriter, req *http.Request) bool { - if err := awsutils.VerifyAWSSignatureV2(req, m.AWSCredentialsProvider); err != nil { + if err := awsutils.VerifyAWSSignature(req, m.AWSCredentialsProvider); err != nil { m.Log.ErrorContext(req.Context(), "AWS signature verification failed", "error", err) rw.WriteHeader(http.StatusForbidden) return true @@ -149,7 +149,7 @@ func (m *AWSAccessMiddleware) handleRequestByAssumedRole(rw http.ResponseWriter, aws.ToString(assumedRole.Credentials.SessionToken), ) - if err := awsutils.VerifyAWSSignatureV2(req, credentials); err != nil { + if err := awsutils.VerifyAWSSignature(req, credentials); err != nil { m.Log.ErrorContext(req.Context(), "AWS signature verification failed", "error", err) rw.WriteHeader(http.StatusForbidden) return true diff --git a/lib/srv/app/aws/handler.go b/lib/srv/app/aws/handler.go index 8bd04ea356e89..f7d95e0badf74 100644 --- a/lib/srv/app/aws/handler.go +++ b/lib/srv/app/aws/handler.go @@ -33,6 +33,7 @@ import ( "github.com/jonboulle/clockwork" "github.com/gravitational/teleport" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/httplib/reverseproxy" @@ -53,12 +54,12 @@ type signerHandler struct { // SignerHandlerConfig is the awsSignerHandler configuration. type SignerHandlerConfig struct { + // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. + AWSConfigProvider awsconfig.Provider // Log is a logger for the handler. Log *slog.Logger // RoundTripper is an http.RoundTripper instance used for requests. RoundTripper http.RoundTripper - // SigningService is used to sign requests before forwarding them. - *awsutils.SigningService // Clock is used to override time in tests. Clock clockwork.Clock // MaxHTTPRequestBodySize is the limit on how big a request body can be. @@ -67,8 +68,8 @@ type SignerHandlerConfig struct { // CheckAndSetDefaults validates the AwsSignerHandlerConfig. func (cfg *SignerHandlerConfig) CheckAndSetDefaults() error { - if cfg.SigningService == nil { - return trace.BadParameter("missing SigningService") + if cfg.AWSConfigProvider == nil { + return trace.BadParameter("aws config provider missing") } if cfg.RoundTripper == nil { tr, err := defaults.Transport() @@ -165,15 +166,24 @@ func (s *signerHandler) serveCommonRequest(sessCtx *common.SessionContext, w htt return trace.Wrap(err) } - signedReq, err := s.SignRequest(s.closeContext, unsignedReq, + awsCfg, err := s.AWSConfigProvider.GetConfig(s.closeContext, re.SigningRegion, + awsconfig.WithDetailedAssumeRole(awsconfig.AssumeRole{ + RoleARN: sessCtx.Identity.RouteToApp.AWSRoleARN, + ExternalID: sessCtx.App.GetAWSExternalID(), + SessionName: sessCtx.Identity.Username, + }), + awsconfig.WithCredentialsMaybeIntegration(sessCtx.App.GetIntegration()), + ) + if err != nil { + return trace.Wrap(err) + } + + signedReq, err := awsutils.SignRequest(s.closeContext, unsignedReq, &awsutils.SigningCtx{ + Clock: s.Clock, + Credentials: awsCfg.Credentials, SigningName: re.SigningName, SigningRegion: re.SigningRegion, - Expiry: sessCtx.Identity.Expires, - SessionName: sessCtx.Identity.Username, - AWSRoleArn: sessCtx.Identity.RouteToApp.AWSRoleARN, - AWSExternalID: sessCtx.App.GetAWSExternalID(), - Integration: sessCtx.App.GetIntegration(), }) if err != nil { return trace.Wrap(err) diff --git a/lib/srv/app/aws/handler_test.go b/lib/srv/app/aws/handler_test.go index 90dbd4ed46d21..4f19b9fb18a95 100644 --- a/lib/srv/app/aws/handler_test.go +++ b/lib/srv/app/aws/handler_test.go @@ -31,6 +31,7 @@ import ( "testing" "time" + credentialsv2 "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" @@ -50,6 +51,8 @@ import ( "github.com/gravitational/teleport/api/constants" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/api/types/events" + "github.com/gravitational/teleport/lib/cloud/awsconfig" + "github.com/gravitational/teleport/lib/cloud/mocks" libevents "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/events/eventstest" "github.com/gravitational/teleport/lib/srv/app/common" @@ -191,12 +194,19 @@ func TestAWSSignerHandler(t *testing.T) { }) require.NoError(t, err) + awsOIDCIntegration, err := types.NewIntegrationAWSOIDC( + types.Metadata{Name: "my-integration"}, + &types.AWSOIDCIntegrationSpecV1{ + RoleARN: "arn:aws:sts::123456789012:role/TestRole", + }, + ) + require.NoError(t, err) consoleAppWithIntegration, err := types.NewAppV3(types.Metadata{ Name: "awsconsole", }, types.AppSpecV3{ URI: constants.AWSConsoleURL, PublicAddr: "test.local", - Integration: "my-integration", + Integration: awsOIDCIntegration.GetName(), }) require.NoError(t, err) @@ -204,7 +214,7 @@ func TestAWSSignerHandler(t *testing.T) { name string app types.Application awsClientSession *session.Session - awsSessionProvider awsutils.AWSSessionProvider + awsConfigProvider awsconfig.Provider request makeRequest advanceClock time.Duration wantHost string @@ -226,7 +236,7 @@ func TestAWSSignerHandler(t *testing.T) { })), request: s3Request, wantHost: "s3.us-west-2.amazonaws.com", - wantAuthCredKeyID: "AKIDl", + wantAuthCredKeyID: "FAKEACCESSKEYID", wantAuthCredService: "s3", wantAuthCredRegion: "us-west-2", wantEventType: &events.AppSessionRequest{}, @@ -242,14 +252,14 @@ func TestAWSSignerHandler(t *testing.T) { Region: aws.String("us-west-2"), })), request: s3Request, - awsSessionProvider: func(ctx context.Context, region, integration string) (*session.Session, error) { - if integration != "my-integration" { - return nil, trace.BadParameter("") - } - return nil, nil + awsConfigProvider: &mocks.AWSConfigProvider{ + OIDCIntegrationClient: &mocks.FakeOIDCIntegrationClient{ + Integration: awsOIDCIntegration, + Token: "fake-oidc-token", + }, }, wantHost: "s3.us-west-2.amazonaws.com", - wantAuthCredKeyID: "AKIDl", + wantAuthCredKeyID: "FAKEACCESSKEYID", wantAuthCredService: "s3", wantAuthCredRegion: "us-west-2", wantEventType: &events.AppSessionRequest{}, @@ -266,7 +276,7 @@ func TestAWSSignerHandler(t *testing.T) { })), request: s3Request, wantHost: "s3.us-west-1.amazonaws.com", - wantAuthCredKeyID: "AKIDl", + wantAuthCredKeyID: "FAKEACCESSKEYID", wantAuthCredService: "s3", wantAuthCredRegion: "us-west-1", wantEventType: &events.AppSessionRequest{}, @@ -314,7 +324,7 @@ func TestAWSSignerHandler(t *testing.T) { })), request: dynamoRequest, wantHost: "dynamodb.us-east-1.amazonaws.com", - wantAuthCredKeyID: "AKIDl", + wantAuthCredKeyID: "FAKEACCESSKEYID", wantAuthCredService: "dynamodb", wantAuthCredRegion: "us-east-1", wantEventType: &events.AppSessionDynamoDBRequest{}, @@ -331,7 +341,7 @@ func TestAWSSignerHandler(t *testing.T) { })), request: dynamoRequest, wantHost: "dynamodb.us-west-1.amazonaws.com", - wantAuthCredKeyID: "AKIDl", + wantAuthCredKeyID: "FAKEACCESSKEYID", wantAuthCredService: "dynamodb", wantAuthCredRegion: "us-west-1", wantEventType: &events.AppSessionDynamoDBRequest{}, @@ -379,7 +389,7 @@ func TestAWSSignerHandler(t *testing.T) { })), request: lambdaRequest, wantHost: "lambda.us-east-1.amazonaws.com", - wantAuthCredKeyID: "AKIDl", + wantAuthCredKeyID: "FAKEACCESSKEYID", wantAuthCredService: "lambda", wantAuthCredRegion: "us-east-1", wantEventType: &events.AppSessionRequest{}, @@ -411,7 +421,7 @@ func TestAWSSignerHandler(t *testing.T) { request: assumeRoleRequest(2 * time.Hour), advanceClock: 10 * time.Minute, wantHost: "sts.amazonaws.com", - wantAuthCredKeyID: "AKIDl", + wantAuthCredKeyID: "FAKEACCESSKEYID", wantAuthCredService: "sts", wantAuthCredRegion: "us-east-1", wantEventType: &events.AppSessionRequest{}, @@ -429,7 +439,7 @@ func TestAWSSignerHandler(t *testing.T) { })), request: assumeRoleRequest(32 * time.Minute), wantHost: "sts.amazonaws.com", - wantAuthCredKeyID: "AKIDl", + wantAuthCredKeyID: "FAKEACCESSKEYID", wantAuthCredService: "sts", wantAuthCredRegion: "us-east-1", wantEventType: &events.AppSessionRequest{}, @@ -445,14 +455,10 @@ func TestAWSSignerHandler(t *testing.T) { Credentials: staticAWSCredentialsForClient, Region: aws.String("us-east-1"), })), - request: assumeRoleRequest(2 * time.Hour), - advanceClock: 50 * time.Minute, // identity is expiring in 10m which is less than minimum - wantHost: "sts.amazonaws.com", - wantAuthCredKeyID: "AKIDl", - wantAuthCredService: "sts", - wantAuthCredRegion: "us-east-1", - wantEventType: &events.AppSessionRequest{}, + request: assumeRoleRequest(2 * time.Hour), + advanceClock: 50 * time.Minute, // identity is expiring in 10m which is less than minimum errAssertionFns: []require.ErrorAssertionFunc{ + // the request is 403 forbidden by Teleport, so the mock AWS handler won't be sent anything. hasStatusCode(http.StatusForbidden), }, }, @@ -476,7 +482,9 @@ func TestAWSSignerHandler(t *testing.T) { // check that the signature is valid. if !tc.skipVerifySignature { - err = awsutils.VerifyAWSSignature(r, staticAWSCredentials) + err := awsutils.VerifyAWSSignature(r, + credentialsv2.NewStaticCredentialsProvider(tc.wantAuthCredKeyID, "secret", "token"), + ) if !assert.NoError(t, err) { http.Error(w, err.Error(), trace.ErrorToCode(err)) return @@ -490,12 +498,12 @@ func TestAWSSignerHandler(t *testing.T) { w.WriteHeader(http.StatusOK) } - sessionProvider := awsutils.SessionProviderUsingAmbientCredentials() - if tc.awsSessionProvider != nil { - sessionProvider = tc.awsSessionProvider + awsCfgProvider := tc.awsConfigProvider + if awsCfgProvider == nil { + awsCfgProvider = &mocks.AWSConfigProvider{} } - suite := createSuite(t, mockAwsHandler, tc.app, fakeClock, sessionProvider) + suite := createSuite(t, mockAwsHandler, tc.app, fakeClock, awsCfgProvider) fakeClock.Advance(tc.advanceClock) err := tc.request(suite.URL, tc.awsClientSession, tc.wantHost) @@ -603,7 +611,6 @@ const assumedRoleKeyID = "assumedRoleKeyID" var ( staticAWSCredentialsForAssumedRole = credentials.NewStaticCredentials(assumedRoleKeyID, "assumedRoleKeySecret", "") - staticAWSCredentials = credentials.NewStaticCredentials("AKIDl", "SECRET", "SESSION") staticAWSCredentialsForClient = credentials.NewStaticCredentials("fakeClientKeyID", "fakeClientSecret", "") ) @@ -614,7 +621,7 @@ type suite struct { recorder *eventstest.ChannelRecorder } -func createSuite(t *testing.T, mockAWSHandler http.HandlerFunc, app types.Application, clock clockwork.Clock, awsSessionProvider awsutils.AWSSessionProvider) *suite { +func createSuite(t *testing.T, mockAWSHandler http.HandlerFunc, app types.Application, clock clockwork.Clock, acp awsconfig.Provider) *suite { recorder := eventstest.NewChannelRecorder(1) identity := tlsca.Identity{ Username: "user", @@ -630,13 +637,6 @@ func createSuite(t *testing.T, mockAWSHandler http.HandlerFunc, app types.Applic awsAPIMock.Close() }) - svc, err := awsutils.NewSigningService(awsutils.SigningServiceConfig{ - SessionProvider: awsSessionProvider, - CredentialsGetter: awsutils.NewStaticCredentialsGetter(staticAWSCredentials), - Clock: clock, - }) - require.NoError(t, err) - audit, err := common.NewAudit(common.AuditConfig{ Emitter: libevents.NewDiscardEmitter(), Recorder: libevents.WithNoOpPreparer(recorder), @@ -644,7 +644,7 @@ func createSuite(t *testing.T, mockAWSHandler http.HandlerFunc, app types.Applic require.NoError(t, err) signerHandler, err := NewAWSSignerHandler(context.Background(), SignerHandlerConfig{ - SigningService: svc, + AWSConfigProvider: acp, RoundTripper: &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, diff --git a/lib/srv/app/connections_handler.go b/lib/srv/app/connections_handler.go index 3fad12e54eaaa..f5a583fc05c77 100644 --- a/lib/srv/app/connections_handler.go +++ b/lib/srv/app/connections_handler.go @@ -45,6 +45,7 @@ import ( "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/authz" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/services" @@ -93,6 +94,9 @@ type ConnectionsHandlerConfig struct { // AWSSessionProvider is used to provide AWS Sessions. AWSSessionProvider awsutils.AWSSessionProvider + // AWSConfigProvider provides [aws.Config] for AWS SDK service clients. + AWSConfigProvider awsconfig.Provider + // TLSConfig is the *tls.Config for this server. TLSConfig *tls.Config @@ -142,6 +146,9 @@ func (c *ConnectionsHandlerConfig) CheckAndSetDefaults() error { if c.AWSSessionProvider == nil { return trace.BadParameter("aws session provider missing") } + if c.AWSConfigProvider == nil { + return trace.BadParameter("aws config provider missing") + } if c.Cloud == nil { cloud, err := NewCloud(CloudConfig{ Clock: c.Clock, @@ -206,16 +213,9 @@ func NewConnectionsHandler(closeContext context.Context, cfg *ConnectionsHandler return nil, trace.Wrap(err) } - awsSigner, err := awsutils.NewSigningService(awsutils.SigningServiceConfig{ - Clock: cfg.Clock, - SessionProvider: cfg.AWSSessionProvider, - }) - if err != nil { - return nil, trace.Wrap(err) - } awsHandler, err := appaws.NewAWSSignerHandler(closeContext, appaws.SignerHandlerConfig{ - SigningService: awsSigner, - Clock: cfg.Clock, + AWSConfigProvider: cfg.AWSConfigProvider, + Clock: cfg.Clock, }) if err != nil { return nil, trace.Wrap(err) diff --git a/lib/srv/app/server_test.go b/lib/srv/app/server_test.go index 1c0c8b4f322b3..3f0da8abda9b6 100644 --- a/lib/srv/app/server_test.go +++ b/lib/srv/app/server_test.go @@ -56,6 +56,7 @@ import ( "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/authz" + "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/cryptosuites" "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/httplib/reverseproxy" @@ -365,6 +366,7 @@ func SetUpSuiteWithConfig(t *testing.T, config suiteConfig) *Suite { CipherSuites: utils.DefaultCipherSuites(), ServiceComponent: teleport.ComponentApp, AWSSessionProvider: aws.SessionProviderUsingAmbientCredentials(), + AWSConfigProvider: &mocks.AWSConfigProvider{}, }) require.NoError(t, err) diff --git a/lib/srv/db/dynamodb/engine.go b/lib/srv/db/dynamodb/engine.go index 734ff51568f2c..165d856677f9d 100644 --- a/lib/srv/db/dynamodb/engine.go +++ b/lib/srv/db/dynamodb/engine.go @@ -40,6 +40,7 @@ import ( "github.com/gravitational/teleport" apievents "github.com/gravitational/teleport/api/types/events" apiaws "github.com/gravitational/teleport/api/utils/aws" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/modules" @@ -138,14 +139,6 @@ func (e *Engine) HandleConnection(ctx context.Context, _ *common.Session) error } defer e.Audit.OnSessionEnd(e.Context, e.sessionCtx) - signer, err := libaws.NewSigningService(libaws.SigningServiceConfig{ - Clock: e.Clock, - AWSConfigProvider: e.AWSConfigProvider, - }) - if err != nil { - return trace.Wrap(err) - } - clientConnReader := bufio.NewReader(e.clientConn) observe() @@ -159,7 +152,7 @@ func (e *Engine) HandleConnection(ctx context.Context, _ *common.Session) error return trace.Wrap(err) } - if err := e.process(ctx, req, signer, msgFromClient, msgFromServer); err != nil { + if err := e.process(ctx, req, msgFromClient, msgFromServer); err != nil { return trace.Wrap(err) } } @@ -167,7 +160,7 @@ func (e *Engine) HandleConnection(ctx context.Context, _ *common.Session) error // process reads request from connected dynamodb client, processes the requests/responses and sends data back // to the client. -func (e *Engine) process(ctx context.Context, req *http.Request, signer *libaws.SigningService, msgFromClient prometheus.Counter, msgFromServer prometheus.Counter) (err error) { +func (e *Engine) process(ctx context.Context, req *http.Request, msgFromClient prometheus.Counter, msgFromServer prometheus.Counter) (err error) { msgFromClient.Inc() if req.Body != nil { @@ -210,20 +203,32 @@ func (e *Engine) process(ctx context.Context, req *http.Request, signer *libaws. if err != nil { return trace.Wrap(err) } - signingCtx := &libaws.SigningCtx{ - SigningName: re.SigningName, - SigningRegion: re.SigningRegion, - Expiry: e.sessionCtx.Identity.Expires, - SessionName: e.sessionCtx.Identity.Username, - BaseAWSRoleARN: meta.AssumeRoleARN, - BaseAWSExternalID: meta.ExternalID, - AWSRoleArn: roleArn, - SessionTags: e.sessionCtx.Database.GetAWS().SessionTags, + + ar := awsconfig.AssumeRole{ + RoleARN: roleArn, + SessionName: e.sessionCtx.Identity.Username, + Tags: meta.SessionTags, } if meta.AssumeRoleARN == "" { - signingCtx.AWSExternalID = meta.ExternalID + ar.ExternalID = meta.ExternalID } - signedReq, err := signer.SignRequest(e.Context, outReq, signingCtx) + awsCfg, err := e.AWSConfigProvider.GetConfig(ctx, meta.Region, + awsconfig.WithAssumeRole(meta.AssumeRoleARN, meta.ExternalID), + awsconfig.WithDetailedAssumeRole(ar), + awsconfig.WithAmbientCredentials(), + ) + if err != nil { + return trace.Wrap(err) + } + + signingCtx := &libaws.SigningCtx{ + Clock: e.Clock, + Credentials: awsCfg.Credentials, + SigningName: re.SigningName, + SigningRegion: re.SigningRegion, + } + + signedReq, err := libaws.SignRequest(e.Context, outReq, signingCtx) if err != nil { return trace.Wrap(err) } diff --git a/lib/srv/db/dynamodb/test.go b/lib/srv/db/dynamodb/test.go index b12d0493ea75e..e91125dff69f8 100644 --- a/lib/srv/db/dynamodb/test.go +++ b/lib/srv/db/dynamodb/test.go @@ -101,7 +101,7 @@ func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (*T mux := http.NewServeMux() mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - err := awsutils.VerifyAWSSignatureV2(r, + err := awsutils.VerifyAWSSignature(r, credentials.NewStaticCredentialsProvider("FAKEACCESSKEYID", "secret", "token"), ) if err != nil { diff --git a/lib/srv/db/opensearch/engine.go b/lib/srv/db/opensearch/engine.go index 4f54f4b5a282d..f017f5c7a8369 100644 --- a/lib/srv/db/opensearch/engine.go +++ b/lib/srv/db/opensearch/engine.go @@ -34,6 +34,7 @@ import ( "github.com/gravitational/teleport" apievents "github.com/gravitational/teleport/api/types/events" "github.com/gravitational/teleport/api/types/wrappers" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/srv/db/common" @@ -133,14 +134,6 @@ func (e *Engine) HandleConnection(ctx context.Context, _ *common.Session) error e.Audit.OnSessionStart(e.Context, e.sessionCtx, err) return trace.Wrap(err) } - signer, err := libaws.NewSigningService(libaws.SigningServiceConfig{ - Clock: e.Clock, - AWSConfigProvider: e.AWSConfigProvider, - }) - if err != nil { - return trace.Wrap(err) - } - // TODO(Tener): // Consider rewriting to support HTTP2 clients. // Ideally we should have shared middleware for DB clients using HTTP, instead of separate per-engine implementations. @@ -166,7 +159,7 @@ func (e *Engine) HandleConnection(ctx context.Context, _ *common.Session) error return trace.Wrap(err) } - if err := e.process(ctx, tr, signer, req, msgFromClient, msgFromServer); err != nil { + if err := e.process(ctx, tr, req, msgFromClient, msgFromServer); err != nil { return trace.Wrap(err) } } @@ -174,7 +167,7 @@ func (e *Engine) HandleConnection(ctx context.Context, _ *common.Session) error // process reads request from connected OpenSearch client, processes the requests/responses and send data back // to the client. -func (e *Engine) process(ctx context.Context, tr *http.Transport, signer *libaws.SigningService, req *http.Request, msgFromClient prometheus.Counter, msgFromServer prometheus.Counter) error { +func (e *Engine) process(ctx context.Context, tr *http.Transport, req *http.Request, msgFromClient prometheus.Counter, msgFromServer prometheus.Counter) error { msgFromClient.Inc() if req.Body != nil { @@ -193,7 +186,7 @@ func (e *Engine) process(ctx context.Context, tr *http.Transport, signer *libaws e.emitAuditEvent(reqCopy, payload, responseStatusCode, err == nil) }() - signedReq, err := e.getSignedRequest(signer, reqCopy) + signedReq, err := e.getSignedRequest(reqCopy) if err != nil { return trace.Wrap(err) } @@ -225,31 +218,33 @@ func (e *Engine) getTransport(ctx context.Context) (*http.Transport, error) { return tr, nil } -func (e *Engine) getSignedRequest(signer *libaws.SigningService, reqCopy *http.Request) (*http.Request, error) { +func (e *Engine) getSignedRequest(reqCopy *http.Request) (*http.Request, error) { roleArn, err := libaws.BuildRoleARN(e.sessionCtx.DatabaseUser, e.sessionCtx.Database.GetAWS().Region, e.sessionCtx.Database.GetAWS().AccountID) if err != nil { return nil, trace.Wrap(err) } meta := e.sessionCtx.Database.GetAWS() - signCtx := &libaws.SigningCtx{ - SigningName: "es", - SigningRegion: meta.Region, - Expiry: e.sessionCtx.Identity.Expires, - SessionName: e.sessionCtx.Identity.Username, - BaseAWSRoleARN: meta.AssumeRoleARN, - BaseAWSExternalID: meta.ExternalID, - AWSRoleArn: roleArn, - // OpenSearch uses meta.ExternalID for both the base role and the - // assumed role. - AWSExternalID: meta.ExternalID, + awsCfg, err := e.AWSConfigProvider.GetConfig(e.Context, meta.Region, + awsconfig.WithAssumeRole(meta.AssumeRoleARN, meta.ExternalID), + awsconfig.WithDetailedAssumeRole(awsconfig.AssumeRole{ + RoleARN: roleArn, + ExternalID: meta.ExternalID, + SessionName: e.sessionCtx.Identity.Username, + }), + awsconfig.WithAmbientCredentials(), + ) + if err != nil { + return nil, trace.Wrap(err) } - - if meta.AssumeRoleARN == "" { - signCtx.AWSExternalID = meta.ExternalID + signCtx := &libaws.SigningCtx{ + Clock: e.Clock, + Credentials: awsCfg.Credentials, + SigningName: "es", + SigningRegion: e.sessionCtx.Database.GetAWS().Region, } - signedReq, err := signer.SignRequest(e.Context, reqCopy, signCtx) + signedReq, err := libaws.SignRequest(e.Context, reqCopy, signCtx) if err != nil { return nil, trace.Wrap(err) } diff --git a/lib/utils/aws/aws.go b/lib/utils/aws/aws.go index c06636c109150..084a6383e80f7 100644 --- a/lib/utils/aws/aws.go +++ b/lib/utils/aws/aws.go @@ -159,22 +159,17 @@ func IsSignedByAWSSigV4(r *http.Request) bool { return strings.HasPrefix(r.Header.Get(AuthorizationHeader), AmazonSigV4AuthorizationPrefix) } -// VerifyAWSSignatureV2 is a temporary AWS SDK migration helper. -func VerifyAWSSignatureV2(req *http.Request, provider aws.CredentialsProvider) error { - return VerifyAWSSignature(req, migration.NewCredentialsAdapter(provider)) -} - // VerifyAWSSignature verifies the request signature ensuring that the request originates from tsh aws command execution // AWS CLI signs the request with random generated credentials that are passed to LocalProxy by // the AWSCredentials LocalProxyConfig configuration. -func VerifyAWSSignature(req *http.Request, credentials *credentials.Credentials) error { +func VerifyAWSSignature(req *http.Request, credProvider aws.CredentialsProvider) error { sigV4, err := ParseSigV4(req.Header.Get("Authorization")) if err != nil { return trace.BadParameter(err.Error()) } // Verifies the request is signed by the expected access key ID. - credValue, err := credentials.Get() + credValue, err := credProvider.Retrieve(req.Context()) if err != nil { return trace.Wrap(err) } @@ -212,7 +207,7 @@ func VerifyAWSSignature(req *http.Request, credentials *credentials.Credentials) return trace.BadParameter(err.Error()) } - signer := NewSigner(credentials, sigV4.Service) + signer := NewSignerV2(credProvider, sigV4.Service) _, err = signer.Sign(reqCopy, bytes.NewReader(payload), sigV4.Service, sigV4.Region, t) if err != nil { return trace.Wrap(err) diff --git a/lib/utils/aws/credentials.go b/lib/utils/aws/credentials.go index 47c3105174943..8be99d898e5f7 100644 --- a/lib/utils/aws/credentials.go +++ b/lib/utils/aws/credentials.go @@ -20,187 +20,15 @@ package aws import ( "context" - "log/slog" - "sort" - "strings" - "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/sts" "github.com/gravitational/trace" - "github.com/jonboulle/clockwork" "github.com/gravitational/teleport/lib/modules" - "github.com/gravitational/teleport/lib/utils" ) -// GetCredentialsRequest is the request for obtaining STS credentials. -type GetCredentialsRequest struct { - // Provider is the user session used to create the STS client. - Provider client.ConfigProvider - // Expiry is session expiry to be requested. - Expiry time.Time - // SessionName is the session name to be requested. - SessionName string - // RoleARN is the role ARN to be requested. - RoleARN string - // ExternalID is the external ID to be requested, if not empty. - ExternalID string - // Tags is a list of AWS STS session tags. - Tags map[string]string -} - -// CredentialsGetter defines an interface for obtaining STS credentials. -type CredentialsGetter interface { - // Get obtains STS credentials. - Get(ctx context.Context, request GetCredentialsRequest) (*credentials.Credentials, error) -} - -type credentialsGetter struct { -} - -// NewCredentialsGetter returns a new CredentialsGetter. -func NewCredentialsGetter() CredentialsGetter { - return &credentialsGetter{} -} - -// Get obtains STS credentials. -func (g *credentialsGetter) Get(ctx context.Context, request GetCredentialsRequest) (*credentials.Credentials, error) { - slog.DebugContext(ctx, "Creating STS session", - "session_name", request.SessionName, - "role_arn", request.RoleARN, - ) - return stscreds.NewCredentials(request.Provider, request.RoleARN, - func(cred *stscreds.AssumeRoleProvider) { - cred.RoleSessionName = MaybeHashRoleSessionName(request.SessionName) - cred.Expiry.SetExpiration(request.Expiry, 0) - - if request.ExternalID != "" { - cred.ExternalID = aws.String(request.ExternalID) - } - - cred.Tags = make([]*sts.Tag, 0, len(request.Tags)) - for key, value := range request.Tags { - cred.Tags = append(cred.Tags, &sts.Tag{Key: aws.String(key), Value: aws.String(value)}) - } - }, - ), nil -} - -// CachedCredentialsGetterConfig is the config for creating a CredentialsGetter that caches credentials. -type CachedCredentialsGetterConfig struct { - // Getter is the CredentialsGetter for obtaining the STS credentials. - Getter CredentialsGetter - // CacheTTL is the cache TTL. - CacheTTL time.Duration - // Clock is used to control time. - Clock clockwork.Clock -} - -// SetDefaults sets default values for CachedCredentialsGetterConfig. -func (c *CachedCredentialsGetterConfig) SetDefaults() { - if c.Getter == nil { - c.Getter = NewCredentialsGetter() - } - if c.CacheTTL <= 0 { - c.CacheTTL = time.Minute - } - if c.Clock == nil { - c.Clock = clockwork.NewRealClock() - } -} - -// credentialRequestCacheKey credentials request cache key. -type credentialRequestCacheKey struct { - provider client.ConfigProvider - expiry time.Time - sessionName string - roleARN string - externalID string - tags string -} - -// newCredentialRequestCacheKey creates a new cache key for the credentials -// request. -func newCredentialRequestCacheKey(req GetCredentialsRequest) credentialRequestCacheKey { - k := credentialRequestCacheKey{ - provider: req.Provider, - expiry: req.Expiry, - sessionName: req.SessionName, - roleARN: req.RoleARN, - externalID: req.ExternalID, - } - - tags := make([]string, 0, len(req.Tags)) - for key, value := range req.Tags { - tags = append(tags, key+"="+value+",") - } - sort.Strings(tags) - k.tags = strings.Join(tags, ",") - - return k -} - -type cachedCredentialsGetter struct { - config CachedCredentialsGetterConfig - cache *utils.FnCache -} - -// NewCachedCredentialsGetter returns a CredentialsGetter that caches credentials. -func NewCachedCredentialsGetter(config CachedCredentialsGetterConfig) (CredentialsGetter, error) { - config.SetDefaults() - - cache, err := utils.NewFnCache(utils.FnCacheConfig{ - TTL: config.CacheTTL, - Clock: config.Clock, - }) - if err != nil { - return nil, trace.Wrap(err) - } - - return &cachedCredentialsGetter{ - config: config, - cache: cache, - }, nil -} - -// Get returns cached credentials if found, or fetch it from the configured -// getter. -func (g *cachedCredentialsGetter) Get(ctx context.Context, request GetCredentialsRequest) (*credentials.Credentials, error) { - credentials, err := utils.FnCacheGet(ctx, g.cache, newCredentialRequestCacheKey(request), func(ctx context.Context) (*credentials.Credentials, error) { - credentials, err := g.config.Getter.Get(ctx, request) - return credentials, trace.Wrap(err) - }) - return credentials, trace.Wrap(err) -} - -type staticCredentialsGetter struct { - credentials *credentials.Credentials -} - -// NewStaticCredentialsGetter returns a CredentialsGetter that always returns -// the same provided credentials. -// -// Used in testing to mock CredentialsGetter. -func NewStaticCredentialsGetter(credentials *credentials.Credentials) CredentialsGetter { - return &staticCredentialsGetter{ - credentials: credentials, - } -} - -// Get returns the credentials provided to NewStaticCredentialsGetter. -func (g *staticCredentialsGetter) Get(_ context.Context, _ GetCredentialsRequest) (*credentials.Credentials, error) { - if g.credentials == nil { - return nil, trace.NotFound("no credentials found") - } - return g.credentials, nil -} - // AWSSessionProvider defines a function that creates an AWS Session. // It must use ambient credentials if Integration is empty. // It must use Integration credentials otherwise. diff --git a/lib/utils/aws/credentials_test.go b/lib/utils/aws/credentials_test.go deleted file mode 100644 index 3f682f3462cc5..0000000000000 --- a/lib/utils/aws/credentials_test.go +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package aws - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/google/uuid" - "github.com/jonboulle/clockwork" - "github.com/stretchr/testify/require" -) - -type fakeCredentialsGetter struct { -} - -func (f *fakeCredentialsGetter) Get(ctx context.Context, request GetCredentialsRequest) (*credentials.Credentials, error) { - return credentials.NewStaticCredentials( - fmt.Sprintf("%s-%s-%s", request.SessionName, request.RoleARN, request.ExternalID), - uuid.NewString(), - uuid.NewString(), - ), nil -} - -func TestCachedCredentialsGetter(t *testing.T) { - t.Parallel() - - hostSession := session.Must(session.NewSession(&aws.Config{ - Credentials: credentials.AnonymousCredentials, - Region: aws.String("us-west-2"), - })) - fakeClock := clockwork.NewFakeClock() - - getter, err := NewCachedCredentialsGetter(CachedCredentialsGetterConfig{ - Getter: &fakeCredentialsGetter{}, - CacheTTL: time.Minute, - Clock: fakeClock, - }) - require.NoError(t, err) - - cred1, err := getter.Get(context.Background(), GetCredentialsRequest{ - Provider: hostSession, - Expiry: fakeClock.Now().Add(time.Hour), - SessionName: "test-session", - RoleARN: "test-role", - Tags: map[string]string{ - "one": "1", - "two": "2", - "three": "3", - }, - }) - require.NoError(t, err) - checkCredentialsAccessKeyID(t, cred1, "test-session-test-role-") - - tests := []struct { - name string - sessionName string - roleARN string - externalID string - tags map[string]string - advanceClock time.Duration - compareCred1 require.ComparisonAssertionFunc - }{ - { - name: "cached", - sessionName: "test-session", - roleARN: "test-role", - tags: map[string]string{ - "one": "1", - "two": "2", - "three": "3", - }, - compareCred1: require.Same, - }, - { - name: "cached different tags order", - sessionName: "test-session", - roleARN: "test-role", - tags: map[string]string{ - "three": "3", - "two": "2", - "one": "1", - }, - compareCred1: require.Same, - }, - { - name: "different session name", - sessionName: "test-session-2", - roleARN: "test-role", - tags: map[string]string{ - "one": "1", - "two": "2", - "three": "3", - }, - compareCred1: require.NotSame, - }, - { - name: "different role ARN", - sessionName: "test-session", - roleARN: "test-role-2", - tags: map[string]string{ - "one": "1", - "two": "2", - "three": "3", - }, - compareCred1: require.NotSame, - }, - { - name: "different external ID", - sessionName: "test-session", - roleARN: "test-role", - externalID: "test-id", - tags: map[string]string{ - "one": "1", - "two": "2", - "three": "3", - }, - compareCred1: require.NotSame, - }, - { - name: "different tags", - sessionName: "test-session", - roleARN: "test-role", - tags: map[string]string{ - "four": "4", - "five": "5", - }, - compareCred1: require.NotSame, - }, - { - name: "cache expired", - sessionName: "test-session", - roleARN: "test-role", - tags: map[string]string{ - "one": "1", - "two": "2", - "three": "3", - }, - advanceClock: time.Hour, - compareCred1: require.NotSame, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - fakeClock.Advance(test.advanceClock) - - cred, err := getter.Get(context.Background(), GetCredentialsRequest{ - Provider: hostSession, - Expiry: fakeClock.Now().Add(time.Hour), - SessionName: test.sessionName, - RoleARN: test.roleARN, - ExternalID: test.externalID, - Tags: test.tags, - }) - require.NoError(t, err) - checkCredentialsAccessKeyID(t, cred, fmt.Sprintf("%s-%s-%s", test.sessionName, test.roleARN, test.externalID)) - test.compareCred1(t, cred1, cred) - }) - } -} - -func checkCredentialsAccessKeyID(t *testing.T, cred *credentials.Credentials, wantAccessKeyID string) { - t.Helper() - value, err := cred.Get() - require.NoError(t, err) - require.Equal(t, wantAccessKeyID, value.AccessKeyID) -} diff --git a/lib/utils/aws/signing.go b/lib/utils/aws/signing.go index 6549265aed676..31d29532c20d8 100644 --- a/lib/utils/aws/signing.go +++ b/lib/utils/aws/signing.go @@ -23,114 +23,35 @@ import ( "context" "io" "net/http" - "time" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" - "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/utils" ) -// NewSigningService creates a new instance of SigningService. -func NewSigningService(config SigningServiceConfig) (*SigningService, error) { - if err := config.CheckAndSetDefaults(); err != nil { - return nil, trace.Wrap(err) - } - return &SigningService{ - SigningServiceConfig: config, - }, nil -} - -// SigningService is an AWS CLI proxy service that signs AWS requests -// based on user identity. -type SigningService struct { - // SigningServiceConfig is the SigningService configuration. - SigningServiceConfig -} - -// SigningServiceConfig is the SigningService configuration. -type SigningServiceConfig struct { - // SessionProvider is a provider for AWS Sessions. - SessionProvider AWSSessionProvider - // Clock is used to override time in tests. - Clock clockwork.Clock - // CredentialsGetter is used to obtain STS credentials. - CredentialsGetter CredentialsGetter - // AWSConfigProvider is a provider for AWS configs. - AWSConfigProvider awsconfig.Provider -} - -// CheckAndSetDefaults validates the SigningServiceConfig config. -func (s *SigningServiceConfig) CheckAndSetDefaults() error { - if s.Clock == nil { - s.Clock = clockwork.NewRealClock() - } - - if s.AWSConfigProvider == nil { - if s.SessionProvider == nil { - return trace.BadParameter("session provider or config provider is required") - } - if s.CredentialsGetter == nil { - // Use cachedCredentialsGetter by default. cachedCredentialsGetter - // caches the credentials for one minute. - cachedGetter, err := NewCachedCredentialsGetter(CachedCredentialsGetterConfig{ - Clock: s.Clock, - }) - if err != nil { - return trace.Wrap(err) - } - - s.CredentialsGetter = cachedGetter - } - } - return nil -} - // SigningCtx contains AWS SigV4 signing context parameters. type SigningCtx struct { + // Clock is used to override time in tests. + Clock clockwork.Clock + // Credentials provides AWS credentials. + Credentials aws.CredentialsProvider // SigningName is the AWS signing service name. SigningName string // SigningRegion is the AWS region to sign a request for. SigningRegion string - // Expiry is the expiration of the AWS credentials used to sign requests. - Expiry time.Time - // SessionName is role session name of AWS credentials used to sign requests. - SessionName string - // BaseAWSRoleARN is the AWS ARN of the role as a base to the assumed roles. - BaseAWSRoleARN string - // BaseAWSRoleARN is an optional external ID used on base assumed role. - BaseAWSExternalID string - // AWSRoleArn is the AWS ARN of the role to assume for signing requests, - // chained with BaseAWSRoleARN. - AWSRoleArn string - // AWSExternalID is an optional external ID used when getting sts credentials. - AWSExternalID string - // SessionTags is a list of AWS STS session tags. - SessionTags map[string]string - // Integration is the Integration name to use to generate credentials. - // If empty, it will use ambient credentials - Integration string } // Check checks signing context parameters. -func (sc *SigningCtx) Check(clock clockwork.Clock) error { +func (sc *SigningCtx) Check() error { switch { + case sc.Credentials == nil: + return trace.BadParameter("missing AWS credentials") case sc.SigningName == "": return trace.BadParameter("missing AWS signing name") case sc.SigningRegion == "": return trace.BadParameter("missing AWS signing region") - case sc.SessionName == "": - return trace.BadParameter("missing AWS session name") - case sc.AWSRoleArn == "": - return trace.BadParameter("missing AWS Role ARN") - case sc.Expiry.Before(clock.Now()): - return trace.BadParameter("AWS SigV4 expiry has already expired") - } - _, err := ParseRoleARN(sc.AWSRoleArn) - if err != nil { - return trace.Wrap(err) } return nil } @@ -151,11 +72,11 @@ func (sc *SigningCtx) Check(clock clockwork.Clock) error { // Not that for endpoint resolving the https://github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go // package is used and when Amazon releases a new API the dependency update is needed. // 5. Sign HTTP request. -func (s *SigningService) SignRequest(ctx context.Context, req *http.Request, signCtx *SigningCtx) (*http.Request, error) { +func SignRequest(ctx context.Context, req *http.Request, signCtx *SigningCtx) (*http.Request, error) { if signCtx == nil { return nil, trace.BadParameter("missing signing context") } - if err := signCtx.Check(s.Clock); err != nil { + if err := signCtx.Check(); err != nil { return nil, trace.Wrap(err) } payload, err := utils.GetAndReplaceRequestBody(req) @@ -173,11 +94,8 @@ func (s *SigningService) SignRequest(ctx context.Context, req *http.Request, sig // 100-continue" headers without being signed, otherwise the Athena service // would reject the requests. unsignedHeaders := removeUnsignedHeaders(reqCopy) - signer, err := s.newSigner(ctx, signCtx) - if err != nil { - return nil, trace.Wrap(err) - } - _, err = signer.Sign(reqCopy, bytes.NewReader(payload), signCtx.SigningName, signCtx.SigningRegion, s.Clock.Now()) + signer := NewSignerV2(signCtx.Credentials, signCtx.SigningName) + _, err = signer.Sign(reqCopy, bytes.NewReader(payload), signCtx.SigningName, signCtx.SigningRegion, signCtx.Clock.Now()) if err != nil { return nil, trace.Wrap(err) } @@ -187,40 +105,6 @@ func (s *SigningService) SignRequest(ctx context.Context, req *http.Request, sig return reqCopy, nil } -// TODO(gabrielcorado): once all service callers are updated to use -// AWSConfigProvider, make it required and remove session provider and -// credentials getter fallback. -func (s *SigningService) newSigner(ctx context.Context, signCtx *SigningCtx) (*v4.Signer, error) { - if s.AWSConfigProvider != nil { - awsCfg, err := s.AWSConfigProvider.GetConfig(ctx, signCtx.SigningRegion, - awsconfig.WithAssumeRole(signCtx.BaseAWSRoleARN, signCtx.BaseAWSExternalID), - awsconfig.WithAssumeRole(signCtx.AWSRoleArn, signCtx.AWSExternalID), - awsconfig.WithCredentialsMaybeIntegration(signCtx.Integration), - ) - if err != nil { - return nil, trace.Wrap(err) - } - return NewSignerV2(awsCfg.Credentials, signCtx.SigningName), nil - } - - session, err := s.SessionProvider(ctx, signCtx.SigningRegion, signCtx.Integration) - if err != nil { - return nil, trace.Wrap(err) - } - credentials, err := s.CredentialsGetter.Get(ctx, GetCredentialsRequest{ - Provider: session, - Expiry: signCtx.Expiry, - SessionName: signCtx.SessionName, - RoleARN: signCtx.AWSRoleArn, - ExternalID: signCtx.AWSExternalID, - Tags: signCtx.SessionTags, - }) - if err != nil { - return nil, trace.Wrap(err) - } - return NewSigner(credentials, signCtx.SigningName), nil -} - // removeUnsignedHeaders removes and returns header keys that are not included in SigV4 SignedHeaders. // If the request is not already signed, then no headers are removed. func removeUnsignedHeaders(reqCopy *http.Request) []string { From a24fe3e258fa6db164b797e64e2fed2dd567a652 Mon Sep 17 00:00:00 2001 From: Pawel Kopiczko Date: Thu, 30 Jan 2025 20:46:15 +0000 Subject: [PATCH 12/28] Okta API: Remove unused fields and update docs (#51637) * Okta API: Remove unused fields and update docs * Add temporary protobuf liter exception --- .../go/teleport/okta/v1/okta_service.pb.go | 535 +++++++++--------- api/proto/teleport/legacy/types/types.proto | 6 +- api/proto/teleport/okta/v1/okta_service.proto | 6 +- api/types/types.pb.go | 6 +- buf.yaml | 2 + 5 files changed, 267 insertions(+), 288 deletions(-) diff --git a/api/gen/proto/go/teleport/okta/v1/okta_service.pb.go b/api/gen/proto/go/teleport/okta/v1/okta_service.pb.go index 341c409e2ccaa..7f14369fd18df 100644 --- a/api/gen/proto/go/teleport/okta/v1/okta_service.pb.go +++ b/api/gen/proto/go/teleport/okta/v1/okta_service.pb.go @@ -402,14 +402,10 @@ type UpdateIntegrationRequest struct { EnableAppGroupSync bool `protobuf:"varint,6,opt,name=enable_app_group_sync,json=enableAppGroupSync,proto3" json:"enable_app_group_sync,omitempty"` // Settings related to access list synchronization. AccessListSettings *AccessListSettings `protobuf:"bytes,7,opt,name=access_list_settings,json=accessListSettings,proto3" json:"access_list_settings,omitempty"` - // Option to reuse an existing connector. - ReuseConnector string `protobuf:"bytes,8,opt,name=reuse_connector,json=reuseConnector,proto3" json:"reuse_connector,omitempty"` - // SSO metadata URL for integration. - SsoMetadataUrl string `protobuf:"bytes,9,opt,name=sso_metadata_url,json=ssoMetadataUrl,proto3" json:"sso_metadata_url,omitempty"` // Enable sync from Teleport to Okta. If disabled it will prevent creating Okta assignments which // in turn will disable JIT Access Requests. Makes sense only when apps and groups sync is // enabled. - EnableBidirectionalSync bool `protobuf:"varint,10,opt,name=enable_bidirectional_sync,json=enableBidirectionalSync,proto3" json:"enable_bidirectional_sync,omitempty"` + EnableBidirectionalSync bool `protobuf:"varint,8,opt,name=enable_bidirectional_sync,json=enableBidirectionalSync,proto3" json:"enable_bidirectional_sync,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -486,20 +482,6 @@ func (x *UpdateIntegrationRequest) GetAccessListSettings() *AccessListSettings { return nil } -func (x *UpdateIntegrationRequest) GetReuseConnector() string { - if x != nil { - return x.ReuseConnector - } - return "" -} - -func (x *UpdateIntegrationRequest) GetSsoMetadataUrl() string { - if x != nil { - return x.SsoMetadataUrl - } - return "" -} - func (x *UpdateIntegrationRequest) GetEnableBidirectionalSync() bool { if x != nil { return x.EnableBidirectionalSync @@ -1856,7 +1838,7 @@ var file_teleport_okta_v1_okta_service_proto_rawDesc = string([]byte{ 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x83, 0x04, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, + 0x6e, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x22, 0xb0, 0x03, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0f, 0x61, 0x70, 0x69, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, @@ -1879,272 +1861,267 @@ var file_teleport_okta_v1_okta_service_proto_rawDesc = string([]byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, - 0x0a, 0x0f, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x75, 0x73, 0x65, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x73, 0x6f, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x73, 0x73, 0x6f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x55, 0x72, - 0x6c, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x7f, 0x0a, - 0x12, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, - 0x70, 0x70, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x67, - 0x0a, 0x12, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x50, 0x49, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x12, 0x1b, 0x0a, 0x08, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x49, - 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x73, 0x73, 0x77, 0x73, 0x5f, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0f, - 0x73, 0x73, 0x77, 0x73, 0x42, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, - 0x06, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x22, 0x8c, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x56, 0x31, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x46, - 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x8c, 0x01, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x56, 0x31, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x46, 0x0a, - 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb3, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x0a, 0x0b, 0x6f, 0x6b, 0x74, 0x61, 0x5f, - 0x61, 0x70, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6b, - 0x74, 0x61, 0x41, 0x70, 0x70, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x6f, 0x6b, 0x74, 0x61, 0x5f, - 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6f, 0x6b, 0x74, 0x61, 0x41, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, - 0x6b, 0x74, 0x61, 0x5f, 0x61, 0x70, 0x70, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6f, 0x6b, 0x74, 0x61, 0x41, 0x70, 0x70, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x20, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x32, 0x0a, 0x15, 0x6f, 0x6b, 0x74, 0x61, 0x5f, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x13, 0x6f, 0x6b, 0x74, 0x61, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x0f, 0x61, 0x70, 0x69, 0x5f, 0x63, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, - 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x50, 0x49, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x22, 0x23, 0x0a, 0x21, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, - 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, - 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x56, - 0x31, 0x52, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x26, - 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x2e, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, + 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x7f, 0x0a, 0x12, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x22, 0x67, 0x0a, 0x12, 0x4f, + 0x6b, 0x74, 0x61, 0x41, 0x50, 0x49, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x12, 0x1b, 0x0a, 0x08, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x49, 0x64, 0x12, 0x2c, + 0x0a, 0x11, 0x73, 0x73, 0x77, 0x73, 0x5f, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x73, 0x77, + 0x73, 0x42, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x06, 0x0a, 0x04, + 0x61, 0x75, 0x74, 0x68, 0x22, 0x8c, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x27, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x56, 0x31, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x46, 0x0a, 0x0e, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, + 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x22, 0x8c, 0x01, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, + 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x27, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x56, 0x31, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x46, 0x0a, 0x0e, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, + 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x49, 0x6e, + 0x66, 0x6f, 0x22, 0xb3, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x0a, 0x0b, 0x6f, 0x6b, 0x74, 0x61, 0x5f, 0x61, 0x70, 0x70, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6b, 0x74, 0x61, 0x41, + 0x70, 0x70, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x6f, 0x6b, 0x74, 0x61, 0x5f, 0x61, 0x70, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x6b, 0x74, + 0x61, 0x41, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x6b, 0x74, 0x61, + 0x5f, 0x61, 0x70, 0x70, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6f, 0x6b, 0x74, 0x61, 0x41, 0x70, 0x70, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x36, 0x0a, 0x17, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x15, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x20, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, + 0x15, 0x6f, 0x6b, 0x74, 0x61, 0x5f, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6f, 0x6b, + 0x74, 0x61, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, + 0x6c, 0x12, 0x4d, 0x0a, 0x0f, 0x61, 0x70, 0x69, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6c, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x6b, + 0x74, 0x61, 0x41, 0x50, 0x49, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x52, 0x0e, 0x61, 0x70, 0x69, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x22, 0x23, 0x0a, 0x21, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, + 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x81, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3a, 0x0a, 0x0c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, + 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x56, 0x31, 0x52, 0x0b, + 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, + 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x22, 0x2e, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, - 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, - 0x65, 0x56, 0x31, 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x22, - 0x57, 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, + 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x56, 0x31, + 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x22, 0x57, 0x0a, 0x1b, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0b, 0x69, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x56, 0x31, 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x75, 0x6c, 0x65, 0x22, 0x31, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, + 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x21, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x58, 0x0a, 0x1a, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x80, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, + 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, + 0x74, 0x56, 0x31, 0x52, 0x0b, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x2e, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4f, + 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x56, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x0a, 0x61, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x56, 0x31, 0x52, 0x0a, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x22, 0x56, 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x37, 0x0a, 0x0a, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, - 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x56, 0x31, 0x52, 0x0a, 0x69, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x22, 0x31, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x21, 0x0a, 0x1f, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x58, - 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x80, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73, - 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x73, 0x73, 0x69, - 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, - 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x52, 0x0b, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x2e, 0x0a, 0x18, 0x47, - 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x56, 0x0a, 0x1b, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, - 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x0a, 0x61, 0x73, - 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, - 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x52, 0x0a, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, - 0x65, 0x6e, 0x74, 0x22, 0x56, 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, - 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x37, 0x0a, 0x0a, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, - 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x52, - 0x0a, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xc4, 0x01, 0x0a, 0x21, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, - 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, - 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x65, 0x63, - 0x56, 0x31, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x41, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x73, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x48, 0x61, 0x73, 0x50, 0x61, 0x73, 0x73, - 0x65, 0x64, 0x22, 0x31, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, - 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x21, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, - 0x6c, 0x6c, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x32, 0xa9, 0x0e, 0x0a, 0x0b, 0x4f, 0x6b, 0x74, - 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, - 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, - 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, - 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, - 0x65, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, - 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x75, 0x6c, 0x65, 0x56, 0x31, 0x12, 0x5e, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2d, + 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x52, 0x0a, 0x61, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xc4, 0x01, 0x0a, 0x21, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x65, 0x63, 0x56, 0x31, 0x2e, + 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x41, 0x0a, 0x0f, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x48, 0x61, 0x73, 0x50, 0x61, 0x73, 0x73, 0x65, 0x64, 0x22, + 0x31, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, + 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x21, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, + 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x32, 0xa9, 0x0e, 0x0a, 0x0b, 0x4f, 0x6b, 0x74, 0x61, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, + 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x65, 0x6c, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x75, 0x6c, 0x65, 0x56, 0x31, 0x12, 0x5e, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2d, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, + 0x65, 0x56, 0x31, 0x12, 0x5e, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, + 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, + 0x65, 0x56, 0x31, 0x12, 0x5e, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, + 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, + 0x65, 0x56, 0x31, 0x12, 0x5d, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x6b, 0x74, + 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, + 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x65, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, + 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x31, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, - 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x75, 0x6c, 0x65, 0x56, 0x31, 0x12, 0x5d, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x2d, + 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, 0x6b, 0x74, 0x61, 0x49, + 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, + 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, + 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, - 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, - 0x6c, 0x6c, 0x4f, 0x6b, 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x12, 0x31, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, - 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, 0x6b, - 0x74, 0x61, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x72, 0x0a, 0x13, - 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, - 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, - 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, - 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, - 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x58, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, - 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, - 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, - 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x12, 0x5e, 0x0a, 0x14, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x2d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, - 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, - 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, - 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x12, 0x5e, 0x0a, 0x14, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x2d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, - 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, - 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x17, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, - 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x12, 0x69, 0x0a, 0x1a, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, - 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x33, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, - 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x2e, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, + 0x11, 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, + 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, + 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x12, 0x5e, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x2d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, + 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x12, 0x5e, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x2d, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, + 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, + 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x31, 0x12, 0x69, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x33, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, + 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, + 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x2e, 0x74, 0x65, 0x6c, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x65, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, 0x6b, + 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x31, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, - 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, - 0x6c, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x31, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, 0x6b, 0x74, - 0x61, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x84, 0x01, 0x0a, 0x19, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x32, 0x2e, 0x74, 0x65, 0x6c, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x6c, 0x4f, 0x6b, 0x74, 0x61, 0x41, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x84, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, - 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, - 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, - 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x6c, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, - 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, - 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, - 0x0a, 0x07, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x73, 0x12, 0x20, 0x2e, 0x74, 0x65, 0x6c, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x41, 0x70, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x74, 0x65, - 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x41, 0x70, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x22, 0x2e, 0x74, 0x65, - 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, - 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x2f, 0x6f, 0x6b, 0x74, 0x61, 0x2f, 0x76, 0x31, 0x3b, 0x6f, 0x6b, 0x74, 0x61, - 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x32, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x74, 0x65, 0x6c, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x6c, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, + 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, + 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2b, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, + 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, + 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, + 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x07, 0x47, + 0x65, 0x74, 0x41, 0x70, 0x70, 0x73, 0x12, 0x20, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, + 0x70, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x6f, 0x6b, 0x74, 0x61, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0x4c, 0x5a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x67, 0x72, 0x61, 0x76, 0x69, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, + 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x2f, 0x6f, 0x6b, 0x74, 0x61, 0x2f, 0x76, 0x31, 0x3b, 0x6f, 0x6b, 0x74, 0x61, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, }) var ( diff --git a/api/proto/teleport/legacy/types/types.proto b/api/proto/teleport/legacy/types/types.proto index 5ef26f33d3b05..b09cb788a1675 100644 --- a/api/proto/teleport/legacy/types/types.proto +++ b/api/proto/teleport/legacy/types/types.proto @@ -6711,7 +6711,7 @@ message PluginOktaSyncSettings { // AppName is the Okta-assigned unique name of the Okta App that Teleport uses // as a gateway to interact with Okta for SAML login, SCIM provisioning and user - // sync. May be missing for old Okta integration installs. + // sync. string app_name = 8; // DisableSyncAppGroups disables syncing of app groups from Okta. @@ -7180,7 +7180,9 @@ message PluginOktaStatusDetailsSSO { // SSO login. string app_id = 2; - // AppName is the human-readable name of the Okta Applicaion used for SSO. + // AppName is the Okta-assigned unique name of the Okta App that Teleport uses + // as a gateway to interact with Okta for SAML login, SCIM provisioning and user + // sync. string app_name = 3; } diff --git a/api/proto/teleport/okta/v1/okta_service.proto b/api/proto/teleport/okta/v1/okta_service.proto index 1be540f43ab85..bb346eece44a5 100644 --- a/api/proto/teleport/okta/v1/okta_service.proto +++ b/api/proto/teleport/okta/v1/okta_service.proto @@ -150,14 +150,10 @@ message UpdateIntegrationRequest { bool enable_app_group_sync = 6; // Settings related to access list synchronization. AccessListSettings access_list_settings = 7; - // Option to reuse an existing connector. - string reuse_connector = 8; - // SSO metadata URL for integration. - string sso_metadata_url = 9; // Enable sync from Teleport to Okta. If disabled it will prevent creating Okta assignments which // in turn will disable JIT Access Requests. Makes sense only when apps and groups sync is // enabled. - bool enable_bidirectional_sync = 10; + bool enable_bidirectional_sync = 8; } // AccessListSettings contains the settings for access list synchronization. diff --git a/api/types/types.pb.go b/api/types/types.pb.go index 8b7501b928414..c88f3b1c957cc 100644 --- a/api/types/types.pb.go +++ b/api/types/types.pb.go @@ -17239,7 +17239,7 @@ type PluginOktaSyncSettings struct { AppFilters []string `protobuf:"bytes,7,rep,name=app_filters,json=appFilters,proto3" json:"app_filters,omitempty"` // AppName is the Okta-assigned unique name of the Okta App that Teleport uses // as a gateway to interact with Okta for SAML login, SCIM provisioning and user - // sync. May be missing for old Okta integration installs. + // sync. AppName string `protobuf:"bytes,8,opt,name=app_name,json=appName,proto3" json:"app_name,omitempty"` // DisableSyncAppGroups disables syncing of app groups from Okta. // This is useful when the app groups are not needed in Teleport. @@ -18816,7 +18816,9 @@ type PluginOktaStatusDetailsSSO struct { // AppId is the unique Okta application ID of the Okta Applicaion used for // SSO login. AppId string `protobuf:"bytes,2,opt,name=app_id,json=appId,proto3" json:"app_id,omitempty"` - // AppName is the human-readable name of the Okta Applicaion used for SSO. + // AppName is the Okta-assigned unique name of the Okta App that Teleport uses + // as a gateway to interact with Okta for SAML login, SCIM provisioning and user + // sync. AppName string `protobuf:"bytes,3,opt,name=app_name,json=appName,proto3" json:"app_name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` diff --git a/buf.yaml b/buf.yaml index 8f01bbe2dea21..ccdcedcce111e 100644 --- a/buf.yaml +++ b/buf.yaml @@ -84,6 +84,8 @@ breaking: ignore: # TODO(codingllama): Remove ignore once the PDP API is stable. - api/proto/teleport/decision/v1alpha1 + # TODO(kopiczko) remove after https://github.com/gravitational/teleport/pull/51637 is merged + - api/proto/teleport/okta/v1/okta_service.proto ignore_only: RESERVED_ENUM_NO_DELETE: - api/proto/teleport/legacy/types/types.proto From 32d2aa9515da134904db25e19e6b1b049f36e63f Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Thu, 30 Jan 2025 17:06:58 -0500 Subject: [PATCH 13/28] Remove aws-sdk-go depdenency in bot tests (#51680) The use of static credentials in the test was unnecessary and the same result can be achieved using static strings. --- lib/auth/bot_test.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/lib/auth/bot_test.go b/lib/auth/bot_test.go index 3b09c041721a6..ee1cf22358e8c 100644 --- a/lib/auth/bot_test.go +++ b/lib/auth/bot_test.go @@ -32,7 +32,6 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws/credentials" "github.com/digitorus/pkcs7" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -923,16 +922,14 @@ func TestRegisterBot_BotInstanceRejoin(t *testing.T) { }), } - nodeCredentials, err := credentials.NewStaticCredentials("FAKE_ID", "FAKE_KEY", "FAKE_TOKEN").Get() - require.NoError(t, err) - t.Setenv("AWS_ACCESS_KEY_ID", nodeCredentials.AccessKeyID) - t.Setenv("AWS_SECRET_ACCESS_KEY", nodeCredentials.SecretAccessKey) - t.Setenv("AWS_SESSION_TOKEN", nodeCredentials.SessionToken) + t.Setenv("AWS_ACCESS_KEY_ID", "FAKE_ID") + t.Setenv("AWS_SECRET_ACCESS_KEY", "FAKE_KEY") + t.Setenv("AWS_SESSION_TOKEN", "FAKE_TOKEN") t.Setenv("AWS_REGION", "us-west-2") // Create a bot roleName := "test-role" - _, err = CreateRole(ctx, a, roleName, types.RoleSpecV6{}) + _, err := CreateRole(ctx, a, roleName, types.RoleSpecV6{}) require.NoError(t, err) botName := "bot" From ee119ea6d68045124d82b2b99cad5e308d8d19c8 Mon Sep 17 00:00:00 2001 From: Gavin Frazar Date: Thu, 30 Jan 2025 15:06:09 -0800 Subject: [PATCH 14/28] Remove remaining db and discovery AWS SDK imports (#51628) This eliminates all remaining references to AWS SDK v1 in the lib/srv/db/... and lib/srv/discovery/... packages. Most of the changes are simply to remove dead code. --- lib/cloud/aws/errors.go | 22 +- lib/cloud/aws/errors_test.go | 48 ++-- lib/cloud/aws/identity.go | 22 +- lib/cloud/aws/identity_test.go | 19 +- lib/cloud/aws/policy_statements.go | 2 +- lib/cloud/aws/policy_test.go | 2 +- lib/cloud/mocks/aws.go | 105 -------- lib/cloud/mocks/aws_rds.go | 58 ++-- lib/cloud/mocks/aws_redshift_serverless.go | 12 +- lib/cloud/mocks/aws_sts.go | 43 ++- lib/configurators/aws/aws.go | 2 +- .../awsoidc/eks_enroll_clusters.go | 2 +- .../awsoidc/listdeployeddatabaseservice.go | 2 +- lib/srv/db/cloud/aws.go | 4 +- lib/srv/db/cloud/iam.go | 14 +- lib/srv/db/cloud/iam_test.go | 36 ++- .../db/cloud/resource_checker_credentials.go | 2 +- lib/srv/db/cloud/users/elasticache.go | 6 +- lib/srv/db/cloud/users/memorydb.go | 6 +- lib/srv/db/common/auth.go | 74 ++--- lib/srv/db/common/auth_test.go | 10 +- lib/srv/db/common/errors.go | 6 +- lib/srv/db/redis/engine.go | 4 +- lib/srv/db/secrets/aws_secrets_manager.go | 2 +- lib/srv/discovery/common/renaming_test.go | 4 +- lib/srv/discovery/common/server.go | 28 -- lib/srv/discovery/common/server_test.go | 252 ------------------ .../discovery/fetchers/aws-sync/aws-sync.go | 4 +- lib/srv/discovery/fetchers/db/aws_docdb.go | 14 +- .../discovery/fetchers/db/aws_elasticache.go | 8 +- lib/srv/discovery/fetchers/db/aws_memorydb.go | 4 +- .../discovery/fetchers/db/aws_opensearch.go | 4 +- lib/srv/discovery/fetchers/db/aws_rds.go | 4 +- .../discovery/fetchers/db/aws_rds_proxy.go | 6 +- lib/srv/discovery/fetchers/db/aws_redshift.go | 2 +- .../fetchers/db/aws_redshift_serverless.go | 4 +- .../db/aws_redshift_serverless_test.go | 6 +- 37 files changed, 226 insertions(+), 617 deletions(-) delete mode 100644 lib/cloud/mocks/aws.go diff --git a/lib/cloud/aws/errors.go b/lib/cloud/aws/errors.go index 7722e7515bc1f..6daaea372c061 100644 --- a/lib/cloud/aws/errors.go +++ b/lib/cloud/aws/errors.go @@ -26,30 +26,16 @@ import ( awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" ecstypes "github.com/aws/aws-sdk-go-v2/service/ecs/types" iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/gravitational/trace" ) -// ConvertRequestFailureError converts `err` into AWS errors to trace errors. -// If the provided error is not a [awserr.RequestFailure] it delegates -// error conversion to [ConvertRequestFailureErrorV2] for SDK v2 compatibility. -// Prefer using [ConvertRequestFailureErrorV2] directly for AWS SDK v2 client -// errors. -func ConvertRequestFailureError(err error) error { - var requestErr awserr.RequestFailure - if errors.As(err, &requestErr) { - return convertRequestFailureErrorFromStatusCode(requestErr.StatusCode(), requestErr) - } - return ConvertRequestFailureErrorV2(err) -} - -// ConvertRequestFailureErrorV2 converts AWS SDK v2 errors to trace errors. +// ConvertRequestFailureError converts AWS SDK v2 errors to trace errors. // If the provided error is not a [awshttp.ResponseError] it returns the error // without modifying it. -func ConvertRequestFailureErrorV2(err error) error { +func ConvertRequestFailureError(err error) error { var re *awshttp.ResponseError if errors.As(err, &re) { - return convertRequestFailureErrorFromStatusCode(re.HTTPStatusCode(), re.Err) + return convertRequestFailureErrorFromStatusCode(re.HTTPStatusCode(), re) } return err } @@ -107,5 +93,5 @@ func ConvertIAMError(err error) error { return trace.BadParameter(*malformedPolicyDocument.Message) } - return ConvertRequestFailureErrorV2(err) + return ConvertRequestFailureError(err) } diff --git a/lib/cloud/aws/errors_test.go b/lib/cloud/aws/errors_test.go index 8e28e6cb0cf64..eb666ef477ccd 100644 --- a/lib/cloud/aws/errors_test.go +++ b/lib/cloud/aws/errors_test.go @@ -23,10 +23,9 @@ import ( "net/http" "testing" + "github.com/aws/aws-sdk-go-v2/aws" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/gravitational/trace" "github.com/stretchr/testify/require" @@ -37,6 +36,18 @@ func TestConvertRequestFailureError(t *testing.T) { fakeRequestID := "11111111-2222-3333-3333-333333333334" + newResponseError := func(code int) error { + return &awshttp.ResponseError{ + RequestID: fakeRequestID, + ResponseError: &smithyhttp.ResponseError{ + Response: &smithyhttp.Response{Response: &http.Response{ + StatusCode: code, + }}, + Err: trace.Errorf("inner"), + }, + } + } + tests := []struct { name string inputError error @@ -45,49 +56,46 @@ func TestConvertRequestFailureError(t *testing.T) { }{ { name: "StatusForbidden", - inputError: awserr.NewRequestFailure(awserr.New("code", "message", nil), http.StatusForbidden, fakeRequestID), + inputError: newResponseError(http.StatusForbidden), wantIsError: trace.IsAccessDenied, }, { name: "StatusConflict", - inputError: awserr.NewRequestFailure(awserr.New("code", "message", nil), http.StatusConflict, fakeRequestID), + inputError: newResponseError(http.StatusConflict), wantIsError: trace.IsAlreadyExists, }, { name: "StatusNotFound", - inputError: awserr.NewRequestFailure(awserr.New("code", "message", nil), http.StatusNotFound, fakeRequestID), + inputError: newResponseError(http.StatusNotFound), wantIsError: trace.IsNotFound, }, { name: "StatusBadRequest", - inputError: awserr.NewRequestFailure(awserr.New("code", "message", nil), http.StatusBadRequest, fakeRequestID), - wantUnmodified: true, - }, - { - name: "StatusBadRequest with AccessDeniedException", - inputError: awserr.NewRequestFailure(awserr.New("AccessDeniedException", "message", nil), http.StatusBadRequest, fakeRequestID), - wantIsError: trace.IsAccessDenied, - }, - { - name: "not AWS error", - inputError: errors.New("not-aws-error"), + inputError: newResponseError(http.StatusBadRequest), wantUnmodified: true, }, { - name: "v2 sdk error", + name: "StatusBadRequest with AccessDeniedException", inputError: &awshttp.ResponseError{ + RequestID: fakeRequestID, ResponseError: &smithyhttp.ResponseError{ Response: &smithyhttp.Response{Response: &http.Response{ - StatusCode: http.StatusNotFound, + StatusCode: http.StatusBadRequest, }}, - Err: trace.Errorf(""), + Err: trace.Errorf("AccessDeniedException"), }, }, - wantIsError: trace.IsNotFound, + wantIsError: trace.IsAccessDenied, + }, + { + name: "not AWS error", + inputError: errors.New("not-aws-error"), + wantUnmodified: true, }, { name: "v2 sdk error for ecs ClusterNotFoundException", inputError: &awshttp.ResponseError{ + RequestID: fakeRequestID, ResponseError: &smithyhttp.ResponseError{ Response: &smithyhttp.Response{Response: &http.Response{ StatusCode: http.StatusBadRequest, diff --git a/lib/cloud/aws/identity.go b/lib/cloud/aws/identity.go index 8ecad2747887e..05d36b7aceb46 100644 --- a/lib/cloud/aws/identity.go +++ b/lib/cloud/aws/identity.go @@ -24,11 +24,8 @@ import ( "strings" awsv2 "github.com/aws/aws-sdk-go-v2/aws" - stsv2 "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/sts" "github.com/gravitational/trace" ) @@ -109,23 +106,12 @@ func (i identityBase) String() string { } type callerIdentityGetter interface { - GetCallerIdentity(ctx context.Context, params *stsv2.GetCallerIdentityInput, optFns ...func(*stsv2.Options)) (*stsv2.GetCallerIdentityOutput, error) + GetCallerIdentity(ctx context.Context, params *sts.GetCallerIdentityInput, optFns ...func(*sts.Options)) (*sts.GetCallerIdentityOutput, error) } // GetIdentityWithClient determines AWS identity of this Teleport process // using the provided STS API client. -func GetIdentityWithClient(ctx context.Context, stsClient stsiface.STSAPI) (Identity, error) { - out, err := stsClient.GetCallerIdentityWithContext(ctx, &sts.GetCallerIdentityInput{}) - if err != nil { - return nil, trace.Wrap(err) - } - - return IdentityFromArn(aws.StringValue(out.Arn)) -} - -// GetIdentityWithClient determines AWS identity of this Teleport process -// using the provided STS API client. -func GetIdentityWithClientV2(ctx context.Context, clt callerIdentityGetter) (Identity, error) { +func GetIdentityWithClient(ctx context.Context, clt callerIdentityGetter) (Identity, error) { out, err := clt.GetCallerIdentity(ctx, nil) if err != nil { return nil, trace.Wrap(err) diff --git a/lib/cloud/aws/identity_test.go b/lib/cloud/aws/identity_test.go index 20f11f04204dd..747ef265b5daf 100644 --- a/lib/cloud/aws/identity_test.go +++ b/lib/cloud/aws/identity_test.go @@ -22,11 +22,9 @@ import ( "context" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/stretchr/testify/require" + + "github.com/gravitational/teleport/lib/cloud/mocks" ) // TestGetIdentity verifies parsing of AWS identity received from STS API. @@ -79,7 +77,7 @@ func TestGetIdentity(t *testing.T) { } for _, test := range tests { t.Run(test.description, func(t *testing.T) { - identity, err := GetIdentityWithClient(context.Background(), &stsMock{arn: test.inARN}) + identity, err := GetIdentityWithClient(context.Background(), &mocks.STSClient{ARN: test.inARN}) require.NoError(t, err) require.IsType(t, test.outIdentity, identity) require.Equal(t, test.outName, identity.GetName()) @@ -89,14 +87,3 @@ func TestGetIdentity(t *testing.T) { }) } } - -type stsMock struct { - stsiface.STSAPI - arn string -} - -func (m *stsMock) GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) { - return &sts.GetCallerIdentityOutput{ - Arn: aws.String(m.arn), - }, nil -} diff --git a/lib/cloud/aws/policy_statements.go b/lib/cloud/aws/policy_statements.go index d266bed81a088..4289774a70a26 100644 --- a/lib/cloud/aws/policy_statements.go +++ b/lib/cloud/aws/policy_statements.go @@ -21,7 +21,7 @@ package aws import ( "fmt" - "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/gravitational/trace" "github.com/gravitational/teleport/api/types" diff --git a/lib/cloud/aws/policy_test.go b/lib/cloud/aws/policy_test.go index d9085a7f586a3..2efa2d0fcd3be 100644 --- a/lib/cloud/aws/policy_test.go +++ b/lib/cloud/aws/policy_test.go @@ -26,9 +26,9 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/iam" iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" - "github.com/aws/aws-sdk-go/aws/arn" "github.com/google/go-cmp/cmp" "github.com/gravitational/trace" "github.com/stretchr/testify/require" diff --git a/lib/cloud/mocks/aws.go b/lib/cloud/mocks/aws.go deleted file mode 100644 index d48050dc5f373..0000000000000 --- a/lib/cloud/mocks/aws.go +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package mocks - -import ( - "context" - "net/http" - "net/url" - "slices" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/aws/aws-sdk-go/service/sts/stsiface" -) - -// STSClientV1 mocks AWS STS API for AWS SDK v1. -type STSClientV1 struct { - stsiface.STSAPI - ARN string - URL *url.URL - assumedRoleARNs []string - assumedRoleExternalIDs []string - mu sync.Mutex -} - -func (m *STSClientV1) GetAssumedRoleARNs() []string { - m.mu.Lock() - defer m.mu.Unlock() - return m.assumedRoleARNs -} - -func (m *STSClientV1) GetAssumedRoleExternalIDs() []string { - m.mu.Lock() - defer m.mu.Unlock() - return m.assumedRoleExternalIDs -} - -func (m *STSClientV1) ResetAssumeRoleHistory() { - m.mu.Lock() - defer m.mu.Unlock() - m.assumedRoleARNs = nil - m.assumedRoleExternalIDs = nil -} - -func (m *STSClientV1) GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) { - return &sts.GetCallerIdentityOutput{ - Arn: aws.String(m.ARN), - }, nil -} - -func (m *STSClientV1) AssumeRole(in *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { - return m.AssumeRoleWithContext(context.Background(), in) -} - -func (m *STSClientV1) AssumeRoleWithContext(ctx aws.Context, in *sts.AssumeRoleInput, _ ...request.Option) (*sts.AssumeRoleOutput, error) { - m.mu.Lock() - defer m.mu.Unlock() - if !slices.Contains(m.assumedRoleARNs, aws.StringValue(in.RoleArn)) { - m.assumedRoleARNs = append(m.assumedRoleARNs, aws.StringValue(in.RoleArn)) - m.assumedRoleExternalIDs = append(m.assumedRoleExternalIDs, aws.StringValue(in.ExternalId)) - } - expiry := time.Now().Add(60 * time.Minute) - return &sts.AssumeRoleOutput{ - Credentials: &sts.Credentials{ - AccessKeyId: aws.String("FAKEACCESSKEYID"), - SecretAccessKey: aws.String("secret"), - SessionToken: aws.String("token"), - Expiration: &expiry, - }, - }, nil -} - -func (m *STSClientV1) GetCallerIdentityRequest(req *sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) { - return &request.Request{ - HTTPRequest: &http.Request{ - Header: http.Header{}, - URL: m.URL, - }, - Operation: &request.Operation{ - Name: "GetCallerIdentity", - HTTPMethod: "POST", - HTTPPath: "/", - }, - Handlers: request.Handlers{}, - }, nil -} diff --git a/lib/cloud/mocks/aws_rds.go b/lib/cloud/mocks/aws_rds.go index 9338b8330dc5f..ae023e1db787a 100644 --- a/lib/cloud/mocks/aws_rds.go +++ b/lib/cloud/mocks/aws_rds.go @@ -22,11 +22,11 @@ import ( "context" "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/rds" rdsv2 "github.com/aws/aws-sdk-go-v2/service/rds" rdstypes "github.com/aws/aws-sdk-go-v2/service/rds/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" "github.com/google/uuid" "github.com/gravitational/trace" @@ -55,19 +55,19 @@ func (c *RDSClient) DescribeDBInstances(_ context.Context, input *rdsv2.Describe if err != nil { return nil, trace.Wrap(err) } - if aws.StringValue(input.DBInstanceIdentifier) == "" { + if aws.ToString(input.DBInstanceIdentifier) == "" { return &rdsv2.DescribeDBInstancesOutput{ DBInstances: instances, }, nil } for _, instance := range instances { - if aws.StringValue(instance.DBInstanceIdentifier) == aws.StringValue(input.DBInstanceIdentifier) { + if aws.ToString(instance.DBInstanceIdentifier) == aws.ToString(input.DBInstanceIdentifier) { return &rdsv2.DescribeDBInstancesOutput{ DBInstances: []rdstypes.DBInstance{instance}, }, nil } } - return nil, trace.NotFound("instance %v not found", aws.StringValue(input.DBInstanceIdentifier)) + return nil, trace.NotFound("instance %v not found", aws.ToString(input.DBInstanceIdentifier)) } func (c *RDSClient) DescribeDBClusters(_ context.Context, input *rdsv2.DescribeDBClustersInput, _ ...func(*rdsv2.Options)) (*rdsv2.DescribeDBClustersOutput, error) { @@ -82,19 +82,19 @@ func (c *RDSClient) DescribeDBClusters(_ context.Context, input *rdsv2.DescribeD if err != nil { return nil, trace.Wrap(err) } - if aws.StringValue(input.DBClusterIdentifier) == "" { + if aws.ToString(input.DBClusterIdentifier) == "" { return &rdsv2.DescribeDBClustersOutput{ DBClusters: clusters, }, nil } for _, cluster := range clusters { - if aws.StringValue(cluster.DBClusterIdentifier) == aws.StringValue(input.DBClusterIdentifier) { + if aws.ToString(cluster.DBClusterIdentifier) == aws.ToString(input.DBClusterIdentifier) { return &rdsv2.DescribeDBClustersOutput{ DBClusters: []rdstypes.DBCluster{cluster}, }, nil } } - return nil, trace.NotFound("cluster %v not found", aws.StringValue(input.DBClusterIdentifier)) + return nil, trace.NotFound("cluster %v not found", aws.ToString(input.DBClusterIdentifier)) } func (c *RDSClient) ModifyDBInstance(ctx context.Context, input *rdsv2.ModifyDBInstanceInput, optFns ...func(*rdsv2.Options)) (*rdsv2.ModifyDBInstanceOutput, error) { @@ -103,8 +103,8 @@ func (c *RDSClient) ModifyDBInstance(ctx context.Context, input *rdsv2.ModifyDBI } for i, instance := range c.DBInstances { - if aws.StringValue(instance.DBInstanceIdentifier) == aws.StringValue(input.DBInstanceIdentifier) { - if aws.BoolValue(input.EnableIAMDatabaseAuthentication) { + if aws.ToString(instance.DBInstanceIdentifier) == aws.ToString(input.DBInstanceIdentifier) { + if aws.ToBool(input.EnableIAMDatabaseAuthentication) { c.DBInstances[i].IAMDatabaseAuthenticationEnabled = aws.Bool(true) } return &rdsv2.ModifyDBInstanceOutput{ @@ -112,7 +112,7 @@ func (c *RDSClient) ModifyDBInstance(ctx context.Context, input *rdsv2.ModifyDBI }, nil } } - return nil, trace.NotFound("instance %v not found", aws.StringValue(input.DBInstanceIdentifier)) + return nil, trace.NotFound("instance %v not found", aws.ToString(input.DBInstanceIdentifier)) } func (c *RDSClient) ModifyDBCluster(ctx context.Context, input *rdsv2.ModifyDBClusterInput, optFns ...func(*rdsv2.Options)) (*rdsv2.ModifyDBClusterOutput, error) { @@ -121,8 +121,8 @@ func (c *RDSClient) ModifyDBCluster(ctx context.Context, input *rdsv2.ModifyDBCl } for i, cluster := range c.DBClusters { - if aws.StringValue(cluster.DBClusterIdentifier) == aws.StringValue(input.DBClusterIdentifier) { - if aws.BoolValue(input.EnableIAMDatabaseAuthentication) { + if aws.ToString(cluster.DBClusterIdentifier) == aws.ToString(input.DBClusterIdentifier) { + if aws.ToBool(input.EnableIAMDatabaseAuthentication) { c.DBClusters[i].IAMDatabaseAuthenticationEnabled = aws.Bool(true) } return &rdsv2.ModifyDBClusterOutput{ @@ -130,7 +130,7 @@ func (c *RDSClient) ModifyDBCluster(ctx context.Context, input *rdsv2.ModifyDBCl }, nil } } - return nil, trace.NotFound("cluster %v not found", aws.StringValue(input.DBClusterIdentifier)) + return nil, trace.NotFound("cluster %v not found", aws.ToString(input.DBClusterIdentifier)) } func (c *RDSClient) DescribeDBProxies(_ context.Context, input *rdsv2.DescribeDBProxiesInput, _ ...func(*rdsv2.Options)) (*rdsv2.DescribeDBProxiesOutput, error) { @@ -138,19 +138,19 @@ func (c *RDSClient) DescribeDBProxies(_ context.Context, input *rdsv2.DescribeDB return nil, trace.AccessDenied("unauthorized") } - if aws.StringValue(input.DBProxyName) == "" { + if aws.ToString(input.DBProxyName) == "" { return &rdsv2.DescribeDBProxiesOutput{ DBProxies: c.DBProxies, }, nil } for _, dbProxy := range c.DBProxies { - if aws.StringValue(dbProxy.DBProxyName) == aws.StringValue(input.DBProxyName) { + if aws.ToString(dbProxy.DBProxyName) == aws.ToString(input.DBProxyName) { return &rdsv2.DescribeDBProxiesOutput{ DBProxies: []rdstypes.DBProxy{dbProxy}, }, nil } } - return nil, trace.NotFound("proxy %v not found", aws.StringValue(input.DBProxyName)) + return nil, trace.NotFound("proxy %v not found", aws.ToString(input.DBProxyName)) } func (c *RDSClient) DescribeDBProxyEndpoints(_ context.Context, input *rdsv2.DescribeDBProxyEndpointsInput, _ ...func(*rdsv2.Options)) (*rdsv2.DescribeDBProxyEndpointsOutput, error) { @@ -158,8 +158,8 @@ func (c *RDSClient) DescribeDBProxyEndpoints(_ context.Context, input *rdsv2.Des return nil, trace.AccessDenied("unauthorized") } - inputProxyName := aws.StringValue(input.DBProxyName) - inputProxyEndpointName := aws.StringValue(input.DBProxyEndpointName) + inputProxyName := aws.ToString(input.DBProxyName) + inputProxyEndpointName := aws.ToString(input.DBProxyEndpointName) if inputProxyName == "" && inputProxyEndpointName == "" { return &rdsv2.DescribeDBProxyEndpointsOutput{ @@ -170,19 +170,19 @@ func (c *RDSClient) DescribeDBProxyEndpoints(_ context.Context, input *rdsv2.Des var endpoints []rdstypes.DBProxyEndpoint for _, dbProxyEndpoiont := range c.DBProxyEndpoints { if inputProxyEndpointName != "" && - inputProxyEndpointName != aws.StringValue(dbProxyEndpoiont.DBProxyEndpointName) { + inputProxyEndpointName != aws.ToString(dbProxyEndpoiont.DBProxyEndpointName) { continue } if inputProxyName != "" && - inputProxyName != aws.StringValue(dbProxyEndpoiont.DBProxyName) { + inputProxyName != aws.ToString(dbProxyEndpoiont.DBProxyName) { continue } endpoints = append(endpoints, dbProxyEndpoiont) } if len(endpoints) == 0 { - return nil, trace.NotFound("proxy endpoint %v not found", aws.StringValue(input.DBProxyEndpointName)) + return nil, trace.NotFound("proxy endpoint %v not found", aws.ToString(input.DBProxyEndpointName)) } return &rdsv2.DescribeDBProxyEndpointsOutput{DBProxyEndpoints: endpoints}, nil } @@ -198,10 +198,10 @@ func checkEngineFilters(filters []rdstypes.Filter, engineVersions []rdstypes.DBE } recognizedEngines := make(map[string]struct{}) for _, e := range engineVersions { - recognizedEngines[aws.StringValue(e.Engine)] = struct{}{} + recognizedEngines[aws.ToString(e.Engine)] = struct{}{} } for _, f := range filters { - if aws.StringValue(f.Name) != "engine" { + if aws.ToString(f.Name) != "engine" { continue } for _, v := range f.Values { @@ -261,7 +261,7 @@ func clusterIdentifierFilterSet(filters []rdstypes.Filter) map[string]struct{} { func filterValues(filters []rdstypes.Filter, filterKey string) map[string]struct{} { out := make(map[string]struct{}) for _, f := range filters { - if aws.StringValue(f.Name) != filterKey { + if aws.ToString(f.Name) != filterKey { continue } for _, v := range f.Values { @@ -273,19 +273,19 @@ func filterValues(filters []rdstypes.Filter, filterKey string) map[string]struct // instanceEngineMatches returns whether an RDS DBInstance engine matches any engine name in a filter set. func instanceEngineMatches(instance rdstypes.DBInstance, filterSet map[string]struct{}) bool { - _, ok := filterSet[aws.StringValue(instance.Engine)] + _, ok := filterSet[aws.ToString(instance.Engine)] return ok } // instanceClusterIDMatches returns whether an RDS DBInstance ClusterID matches any ClusterID in a filter set. func instanceClusterIDMatches(instance rdstypes.DBInstance, filterSet map[string]struct{}) bool { - _, ok := filterSet[aws.StringValue(instance.DBClusterIdentifier)] + _, ok := filterSet[aws.ToString(instance.DBClusterIdentifier)] return ok } // clusterEngineMatches returns whether an RDS DBCluster engine matches any engine name in a filter set. func clusterEngineMatches(cluster rdstypes.DBCluster, filterSet map[string]struct{}) bool { - _, ok := filterSet[aws.StringValue(cluster.Engine)] + _, ok := filterSet[aws.ToString(cluster.Engine)] return ok } @@ -340,7 +340,7 @@ func WithRDSClusterReader(cluster *rdstypes.DBCluster) { func WithRDSClusterCustomEndpoint(name string) func(*rdstypes.DBCluster) { return func(cluster *rdstypes.DBCluster) { - parsed, _ := arn.Parse(aws.StringValue(cluster.DBClusterArn)) + parsed, _ := arn.Parse(aws.ToString(cluster.DBClusterArn)) cluster.CustomEndpoints = append(cluster.CustomEndpoints, fmt.Sprintf("%v.cluster-custom-aabbccdd.%v.rds.amazonaws.com", name, parsed.Region), ) diff --git a/lib/cloud/mocks/aws_redshift_serverless.go b/lib/cloud/mocks/aws_redshift_serverless.go index c22afed51af4c..3720f5c0e9302 100644 --- a/lib/cloud/mocks/aws_redshift_serverless.go +++ b/lib/cloud/mocks/aws_redshift_serverless.go @@ -23,9 +23,9 @@ import ( "fmt" "time" + "github.com/aws/aws-sdk-go-v2/aws" rss "github.com/aws/aws-sdk-go-v2/service/redshiftserverless" rsstypes "github.com/aws/aws-sdk-go-v2/service/redshiftserverless/types" - "github.com/aws/aws-sdk-go/aws" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" ) @@ -44,13 +44,13 @@ func (m RedshiftServerlessClient) GetWorkgroup(_ context.Context, input *rss.Get } for _, workgroup := range m.Workgroups { - if aws.StringValue(workgroup.WorkgroupName) == aws.StringValue(input.WorkgroupName) { + if aws.ToString(workgroup.WorkgroupName) == aws.ToString(input.WorkgroupName) { return &rss.GetWorkgroupOutput{ Workgroup: &workgroup, }, nil } } - return nil, trace.NotFound("workgroup %q not found", aws.StringValue(input.WorkgroupName)) + return nil, trace.NotFound("workgroup %q not found", aws.ToString(input.WorkgroupName)) } func (m RedshiftServerlessClient) GetEndpointAccess(_ context.Context, input *rss.GetEndpointAccessInput, _ ...func(*rss.Options)) (*rss.GetEndpointAccessOutput, error) { @@ -58,13 +58,13 @@ func (m RedshiftServerlessClient) GetEndpointAccess(_ context.Context, input *rs return nil, trace.AccessDenied("unauthorized") } for _, endpoint := range m.Endpoints { - if aws.StringValue(endpoint.EndpointName) == aws.StringValue(input.EndpointName) { + if aws.ToString(endpoint.EndpointName) == aws.ToString(input.EndpointName) { return &rss.GetEndpointAccessOutput{ Endpoint: &endpoint, }, nil } } - return nil, trace.NotFound("endpoint %q not found", aws.StringValue(input.EndpointName)) + return nil, trace.NotFound("endpoint %q not found", aws.ToString(input.EndpointName)) } func (m RedshiftServerlessClient) ListWorkgroups(_ context.Context, input *rss.ListWorkgroupsInput, _ ...func(*rss.Options)) (*rss.ListWorkgroupsOutput, error) { @@ -93,7 +93,7 @@ func (m RedshiftServerlessClient) ListTagsForResource(_ context.Context, input * return &rss.ListTagsForResourceOutput{}, nil } return &rss.ListTagsForResourceOutput{ - Tags: m.TagsByARN[aws.StringValue(input.ResourceArn)], + Tags: m.TagsByARN[aws.ToString(input.ResourceArn)], }, nil } diff --git a/lib/cloud/mocks/aws_sts.go b/lib/cloud/mocks/aws_sts.go index 09ae2796d194d..56a5e8459cfd3 100644 --- a/lib/cloud/mocks/aws_sts.go +++ b/lib/cloud/mocks/aws_sts.go @@ -20,7 +20,9 @@ package mocks import ( "context" + "net/url" "slices" + "sync" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -32,19 +34,13 @@ import ( "github.com/gravitational/teleport/lib/cloud/awsconfig" ) -// STSClient mocks the AWS STS API for AWS SDK v1 and v2. -// Callers can use it in tests for both the v1 and v2 interfaces. -// This is useful when some services still use SDK v1 while others use v2 SDK, -// so that all assumed roles can be recorded in one place. -// For example: -// -// clt := &STSClient{} -// a.stsClientV1 = &clt.STSClientV1 -// b.stsClientV2 = clt -// ... -// gotRoles := clt.GetAssumedRoleARNs() // returns roles that were assumed with either v1 or v2 client. +// STSClient mocks the AWS STS API for AWS SDK v2. type STSClient struct { - STSClientV1 + ARN string + URL *url.URL + assumedRoleARNs []string + assumedRoleExternalIDs []string + mu sync.Mutex Unauth bool // credentialProvider is only set when a chain of assumed roles is used. @@ -117,8 +113,8 @@ func (m *STSClient) record(roleARN, externalID string) { m.recordFn(roleARN, externalID) return } - m.STSClientV1.mu.Lock() - defer m.STSClientV1.mu.Unlock() + m.mu.Lock() + defer m.mu.Unlock() if !slices.Contains(m.assumedRoleARNs, roleARN) { m.assumedRoleARNs = append(m.assumedRoleARNs, roleARN) m.assumedRoleExternalIDs = append(m.assumedRoleExternalIDs, externalID) @@ -141,3 +137,22 @@ func newAssumeRoleClientProviderFunc(base *STSClient) awsconfig.STSClientProvide return base } } + +func (m *STSClient) GetAssumedRoleARNs() []string { + m.mu.Lock() + defer m.mu.Unlock() + return m.assumedRoleARNs +} + +func (m *STSClient) GetAssumedRoleExternalIDs() []string { + m.mu.Lock() + defer m.mu.Unlock() + return m.assumedRoleExternalIDs +} + +func (m *STSClient) ResetAssumeRoleHistory() { + m.mu.Lock() + defer m.mu.Unlock() + m.assumedRoleARNs = nil + m.assumedRoleExternalIDs = nil +} diff --git a/lib/configurators/aws/aws.go b/lib/configurators/aws/aws.go index 6673b1fbe8519..c6ec812ded8ac 100644 --- a/lib/configurators/aws/aws.go +++ b/lib/configurators/aws/aws.go @@ -400,7 +400,7 @@ func (c *ConfiguratorConfig) CheckAndSetDefaults() error { }) } if c.Identity == nil { - c.Identity, err = awslib.GetIdentityWithClientV2(context.Background(), c.stsClient) + c.Identity, err = awslib.GetIdentityWithClient(context.Background(), c.stsClient) if err != nil { return trace.Wrap(err) } diff --git a/lib/integrations/awsoidc/eks_enroll_clusters.go b/lib/integrations/awsoidc/eks_enroll_clusters.go index 66f647e7c1248..a38acf38360cf 100644 --- a/lib/integrations/awsoidc/eks_enroll_clusters.go +++ b/lib/integrations/awsoidc/eks_enroll_clusters.go @@ -539,7 +539,7 @@ func maybeAddAccessEntry(ctx context.Context, log *slog.Logger, clusterName, rol _, err = clt.CreateAccessEntry(ctx, createAccessEntryReq) if err != nil { - convertedError := awslib.ConvertRequestFailureErrorV2(err) + convertedError := awslib.ConvertRequestFailureError(err) if !trace.IsAccessDenied(convertedError) { return false, trace.Wrap(err) } diff --git a/lib/integrations/awsoidc/listdeployeddatabaseservice.go b/lib/integrations/awsoidc/listdeployeddatabaseservice.go index 4ac05bdbe2c3b..c15138a64a1e4 100644 --- a/lib/integrations/awsoidc/listdeployeddatabaseservice.go +++ b/lib/integrations/awsoidc/listdeployeddatabaseservice.go @@ -140,7 +140,7 @@ func ListDeployedDatabaseServices(ctx context.Context, clt ListDeployedDatabaseS listServicesOutput, err := clt.ListServices(ctx, listServicesInput) if err != nil { - convertedError := awslib.ConvertRequestFailureErrorV2(err) + convertedError := awslib.ConvertRequestFailureError(err) if trace.IsNotFound(convertedError) { return &ListDeployedDatabaseServicesResponse{}, nil } diff --git a/lib/srv/db/cloud/aws.go b/lib/srv/db/cloud/aws.go index 091f066cebe47..fe303654f7933 100644 --- a/lib/srv/db/cloud/aws.go +++ b/lib/srv/db/cloud/aws.go @@ -351,7 +351,7 @@ func (r *rdsDBConfigurator) enableIAMAuth(ctx context.Context, db types.Database EnableIAMDatabaseAuthentication: aws.Bool(true), ApplyImmediately: aws.Bool(true), }) - return awslib.ConvertRequestFailureErrorV2(err) + return awslib.ConvertRequestFailureError(err) } if meta.RDS.InstanceID != "" { _, err = clt.ModifyDBInstance(ctx, &rds.ModifyDBInstanceInput{ @@ -359,7 +359,7 @@ func (r *rdsDBConfigurator) enableIAMAuth(ctx context.Context, db types.Database EnableIAMDatabaseAuthentication: aws.Bool(true), ApplyImmediately: aws.Bool(true), }) - return awslib.ConvertRequestFailureErrorV2(err) + return awslib.ConvertRequestFailureError(err) } return nil } diff --git a/lib/srv/db/cloud/iam.go b/lib/srv/db/cloud/iam.go index dfea3893469bc..7baf562101e1f 100644 --- a/lib/srv/db/cloud/iam.go +++ b/lib/srv/db/cloud/iam.go @@ -20,12 +20,11 @@ package cloud import ( "context" - "errors" "log/slog" + "strings" "sync" "time" - "github.com/aws/aws-sdk-go/aws/credentials" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" @@ -264,7 +263,11 @@ func (c *IAM) getAWSIdentity(ctx context.Context, database types.Database) (awsl return nil, trace.Wrap(err) } clt := c.cfg.awsClients.getSTSClient(awsCfg) - awsIdentity, err := awslib.GetIdentityWithClientV2(ctx, clt) + _, err = awsCfg.Credentials.Retrieve(ctx) + if err != nil { + return nil, trace.Wrap(err, "failed to retrieve credentials") + } + awsIdentity, err := awslib.GetIdentityWithClient(ctx, clt) if err != nil { return nil, trace.Wrap(err) } @@ -298,9 +301,10 @@ func (c *IAM) processTask(ctx context.Context, task iamTask) error { configurator, err := c.getAWSConfigurator(ctx, task.database) if err != nil { c.iamPolicyStatus.Store(task.database.GetName(), types.IAMPolicyStatus_IAM_POLICY_STATUS_FAILED) - if errors.Is(trace.Unwrap(err), credentials.ErrNoValidProvidersFoundInChain) { - c.logger.WarnContext(ctx, "No AWS credentials provider, Skipping IAM task for database", + if strings.Contains(err.Error(), "failed to retrieve credentials") { + c.logger.WarnContext(ctx, "Failed to load AWS IAM configurator, skipping IAM task for database", "database", task.database.GetName(), + "error", err, ) return nil } diff --git a/lib/srv/db/cloud/iam_test.go b/lib/srv/db/cloud/iam_test.go index cae979ddaa360..23fd39378f060 100644 --- a/lib/srv/db/cloud/iam_test.go +++ b/lib/srv/db/cloud/iam_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/iam" iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" rdstypes "github.com/aws/aws-sdk-go-v2/service/rds/types" @@ -33,6 +34,7 @@ import ( "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/auth/authclient" + "github.com/gravitational/teleport/lib/cloud/awsconfig" "github.com/gravitational/teleport/lib/cloud/mocks" "github.com/gravitational/teleport/lib/defaults" "github.com/gravitational/teleport/lib/services" @@ -58,9 +60,7 @@ func TestAWSIAM(t *testing.T) { // Configure mocks. stsClient := &mocks.STSClient{ - STSClientV1: mocks.STSClientV1{ - ARN: "arn:aws:iam::123456789012:role/test-role", - }, + ARN: "arn:aws:iam::123456789012:role/test-role", } clt := &mocks.RDSClient{ @@ -152,9 +152,10 @@ func TestAWSIAM(t *testing.T) { } configurator, err := NewIAM(ctx, IAMConfig{ AccessPoint: &mockAccessPoint{}, - AWSConfigProvider: &mocks.AWSConfigProvider{ - STSClient: stsClient, - }, + AWSConfigProvider: withStaticCredentials( + &mocks.AWSConfigProvider{ + STSClient: stsClient, + }), HostID: "host-id", onProcessedTask: func(iamTask, error) { taskChan <- struct{}{} @@ -289,9 +290,7 @@ func TestAWSIAMNoPermissions(t *testing.T) { // Create unauthorized mocks for AWS services. stsClient := &mocks.STSClient{ - STSClientV1: mocks.STSClientV1{ - ARN: "arn:aws:iam::123456789012:role/test-role", - }, + ARN: "arn:aws:iam::123456789012:role/test-role", } tests := []struct { name string @@ -361,9 +360,10 @@ func TestAWSIAMNoPermissions(t *testing.T) { configurator, err := NewIAM(ctx, IAMConfig{ AccessPoint: &mockAccessPoint{}, HostID: "host-id", - AWSConfigProvider: &mocks.AWSConfigProvider{ - STSClient: stsClient, - }, + AWSConfigProvider: withStaticCredentials( + &mocks.AWSConfigProvider{ + STSClient: stsClient, + }), awsClients: test.awsClients, }) require.NoError(t, err) @@ -425,3 +425,15 @@ func (m *mockAccessPoint) AcquireSemaphore(ctx context.Context, params types.Acq func (m *mockAccessPoint) CancelSemaphoreLease(ctx context.Context, lease types.SemaphoreLease) error { return nil } + +func withStaticCredentials(p awsconfig.Provider) awsconfig.Provider { + return awsconfig.ProviderFunc( + func(ctx context.Context, region string, optFns ...awsconfig.OptionsFn) (aws.Config, error) { + cfg, err := p.GetConfig(ctx, region, optFns...) + if err != nil { + return aws.Config{}, trace.Wrap(err) + } + cfg.Credentials = credentials.NewStaticCredentialsProvider("FAKE_ID", "FAKE_KEY", "FAKE_TOKEN") + return cfg, nil + }) +} diff --git a/lib/srv/db/cloud/resource_checker_credentials.go b/lib/srv/db/cloud/resource_checker_credentials.go index 1902a2d886ffe..1e3854d32bd97 100644 --- a/lib/srv/db/cloud/resource_checker_credentials.go +++ b/lib/srv/db/cloud/resource_checker_credentials.go @@ -121,7 +121,7 @@ func (c *credentialsChecker) getAWSIdentity(ctx context.Context, meta *types.AWS return nil, trace.Wrap(err) } client := c.awsClients.getSTSClient(awsCfg) - return aws.GetIdentityWithClientV2(ctx, client) + return aws.GetIdentityWithClient(ctx, client) }) return identity, trace.Wrap(err) } diff --git a/lib/srv/db/cloud/users/elasticache.go b/lib/srv/db/cloud/users/elasticache.go index 4e46bb5e4ab3d..6bbc2ecd27659 100644 --- a/lib/srv/db/cloud/users/elasticache.go +++ b/lib/srv/db/cloud/users/elasticache.go @@ -164,7 +164,7 @@ func (f *elastiCacheFetcher) getUsersForRegion(ctx context.Context, region strin for pager.HasMorePages() { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libaws.ConvertRequestFailureError(err)) } users = append(users, page.Users...) } @@ -185,7 +185,7 @@ func (f *elastiCacheFetcher) getUserTags(ctx context.Context, user *ectypes.User ResourceName: user.ARN, }) if err != nil { - return nil, trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libaws.ConvertRequestFailureError(err)) } return output.TagList, nil } @@ -256,7 +256,7 @@ func (r *elastiCacheUserResource) ModifyUserPassword(ctx context.Context, oldPas NoPasswordRequired: aws.Bool(len(passwords) == 0), } if _, err := r.client.ModifyUser(ctx, input); err != nil { - return trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return trace.Wrap(libaws.ConvertRequestFailureError(err)) } return nil } diff --git a/lib/srv/db/cloud/users/memorydb.go b/lib/srv/db/cloud/users/memorydb.go index 6b4c09b3561a7..ee41122c497cd 100644 --- a/lib/srv/db/cloud/users/memorydb.go +++ b/lib/srv/db/cloud/users/memorydb.go @@ -163,7 +163,7 @@ func (f *memoryDBFetcher) getUsersForRegion(ctx context.Context, region string, for i := 0; i < common.MaxPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libaws.ConvertRequestFailureError(err)) } users = append(users, page.Users...) } @@ -185,7 +185,7 @@ func (f *memoryDBFetcher) getUserTags(ctx context.Context, user *memorydbtypes.U ResourceArn: user.ARN, }) if err != nil { - return nil, trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libaws.ConvertRequestFailureError(err)) } return output.TagList, nil } @@ -245,7 +245,7 @@ func (r *memoryDBUserResource) ModifyUserPassword(ctx context.Context, oldPasswo input.AuthenticationMode.Passwords = append(input.AuthenticationMode.Passwords, newPassword) if _, err := r.client.UpdateUser(ctx, input); err != nil { - return trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return trace.Wrap(libaws.ConvertRequestFailureError(err)) } return nil } diff --git a/lib/srv/db/common/auth.go b/lib/srv/db/common/auth.go index 92b5c7a785c89..9fdd254521bca 100644 --- a/lib/srv/db/common/auth.go +++ b/lib/srv/db/common/auth.go @@ -35,12 +35,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" rdsauth "github.com/aws/aws-sdk-go-v2/feature/rds/auth" "github.com/aws/aws-sdk-go-v2/service/redshift" rss "github.com/aws/aws-sdk-go-v2/service/redshiftserverless" "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go/aws/credentials" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "golang.org/x/oauth2" @@ -63,12 +62,18 @@ import ( "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" awsutils "github.com/gravitational/teleport/lib/utils/aws" - "github.com/gravitational/teleport/lib/utils/aws/migration" ) -// azureVirtualMachineCacheTTL is the default TTL for Azure virtual machine -// cache entries. -const azureVirtualMachineCacheTTL = 5 * time.Minute +const ( + // azureVirtualMachineCacheTTL is the default TTL for Azure virtual machine + // cache entries. + azureVirtualMachineCacheTTL = 5 * time.Minute + + // emptyPayloadHash is the SHA-256 for an empty element (as in echo -n | sha256sum). + // PresignHTTP requires the hash of the body, but when there is no body we hash the empty string. + // https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html + emptyPayloadHash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) // Auth defines interface for creating auth tokens and TLS configurations. type Auth interface { @@ -659,14 +664,14 @@ func (a *dbAuth) GetElastiCacheRedisToken(ctx context.Context, database types.Da tokenReq := &awsRedisIAMTokenRequest{ // For IAM-enabled ElastiCache users, the username and user id properties must be identical. // https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth-iam.html#auth-iam-limits - userID: databaseUser, - targetID: meta.ElastiCache.ReplicationGroupID, - serviceName: "elasticache", - region: meta.Region, - credentials: migration.NewCredentialsAdapter(awsCfg.Credentials), - clock: a.cfg.Clock, - } - token, err := tokenReq.toSignedRequestURI() + userID: databaseUser, + targetID: meta.ElastiCache.ReplicationGroupID, + serviceName: "elasticache", + region: meta.Region, + credProvider: awsCfg.Credentials, + clock: a.cfg.Clock, + } + token, err := tokenReq.toSignedRequestURI(ctx) return token, trace.Wrap(err) } @@ -685,14 +690,14 @@ func (a *dbAuth) GetMemoryDBToken(ctx context.Context, database types.Database, "database_user", databaseUser, ) tokenReq := &awsRedisIAMTokenRequest{ - userID: databaseUser, - targetID: meta.MemoryDB.ClusterName, - serviceName: "memorydb", - region: meta.Region, - credentials: migration.NewCredentialsAdapter(awsCfg.Credentials), - clock: a.cfg.Clock, - } - token, err := tokenReq.toSignedRequestURI() + userID: databaseUser, + targetID: meta.MemoryDB.ClusterName, + serviceName: "memorydb", + region: meta.Region, + credProvider: awsCfg.Credentials, + clock: a.cfg.Clock, + } + token, err := tokenReq.toSignedRequestURI(ctx) return token, trace.Wrap(err) } @@ -1158,7 +1163,7 @@ func (a *dbAuth) buildAWSRoleARNFromDatabaseUser(ctx context.Context, database t } clt := a.cfg.awsClients.getSTSClient(awsCfg) - identity, err := awslib.GetIdentityWithClientV2(ctx, clt) + identity, err := awslib.GetIdentityWithClient(ctx, clt) if err != nil { return "", trace.Wrap(err) } @@ -1270,8 +1275,8 @@ type awsRedisIAMTokenRequest struct { targetID string // region is the AWS region. region string - // credentials are used to presign with AWS SigV4. - credentials *credentials.Credentials + // credProvider are used to presign with AWS SigV4. + credProvider aws.CredentialsProvider // clock is the clock implementation. clock clockwork.Clock // serviceName is the AWS service name used for signing. @@ -1289,8 +1294,8 @@ func (r *awsRedisIAMTokenRequest) checkAndSetDefaults() error { if r.region == "" { return trace.BadParameter("missing region") } - if r.credentials == nil { - return trace.BadParameter("missing credentials") + if r.credProvider == nil { + return trace.BadParameter("missing credentials provider") } if r.serviceName == "" { return trace.BadParameter("missing service name") @@ -1304,7 +1309,7 @@ func (r *awsRedisIAMTokenRequest) checkAndSetDefaults() error { // toSignedRequestURI creates a new AWS SigV4 pre-signed request URI. // This pre-signed request URI can then be used to authenticate as an // ElastiCache Redis or MemoryDB user. -func (r *awsRedisIAMTokenRequest) toSignedRequestURI() (string, error) { +func (r *awsRedisIAMTokenRequest) toSignedRequestURI(ctx context.Context) (string, error) { if err := r.checkAndSetDefaults(); err != nil { return "", trace.Wrap(err) } @@ -1312,17 +1317,16 @@ func (r *awsRedisIAMTokenRequest) toSignedRequestURI() (string, error) { if err != nil { return "", trace.Wrap(err) } - s := v4.NewSigner(r.credentials) - _, err = s.Presign(req, nil, r.serviceName, r.region, time.Minute*15, r.clock.Now()) + signer := v4.NewSigner() + creds, err := r.credProvider.Retrieve(ctx) if err != nil { return "", trace.Wrap(err) } - res := url.URL{ - Host: req.URL.Host, - Path: "/", - RawQuery: req.URL.RawQuery, + signedURI, _, err := signer.PresignHTTP(ctx, creds, req, emptyPayloadHash, r.serviceName, r.region, r.clock.Now()) + if err != nil { + return "", trace.Wrap(err) } - return strings.TrimPrefix(res.String(), "//"), nil + return strings.TrimPrefix(signedURI, "http://"), nil } // getSignableRequest creates a new request suitable for pre-signing with SigV4. diff --git a/lib/srv/db/common/auth_test.go b/lib/srv/db/common/auth_test.go index 3fb9645b9dddf..499e2737f4f0f 100644 --- a/lib/srv/db/common/auth_test.go +++ b/lib/srv/db/common/auth_test.go @@ -659,10 +659,8 @@ func TestGetAWSIAMCreds(t *testing.T) { "username is partial role ARN": { db: newMongoAtlasDatabase(t, types.AWS{}), stsMock: &mocks.STSClient{ - STSClientV1: mocks.STSClientV1{ - // This is the role returned by the STS GetCallerIdentity. - ARN: "arn:aws:iam::222222222222:role/teleport-service-role", - }, + // This is the role returned by the STS GetCallerIdentity. + ARN: "arn:aws:iam::222222222222:role/teleport-service-role", }, username: "role/role-name", expectedAssumedRoles: []string{"arn:aws:iam::222222222222:role/role-name"}, @@ -682,9 +680,7 @@ func TestGetAWSIAMCreds(t *testing.T) { AssumeRoleARN: "arn:aws:iam::222222222222:role/teleport-service-role-external", }), stsMock: &mocks.STSClient{ - STSClientV1: mocks.STSClientV1{ - ARN: "arn:aws:iam::111111111111:role/teleport-service-role", - }, + ARN: "arn:aws:iam::111111111111:role/teleport-service-role", }, username: "role/role-name", expectedAssumedRoles: []string{ diff --git a/lib/srv/db/common/errors.go b/lib/srv/db/common/errors.go index 64d2980d65c1b..ef3bbb79f87a3 100644 --- a/lib/srv/db/common/errors.go +++ b/lib/srv/db/common/errors.go @@ -26,7 +26,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/go-mysql-org/go-mysql/mysql" "github.com/gravitational/trace" "github.com/jackc/pgconn" @@ -66,16 +65,13 @@ func ConvertError(err error) error { } var googleAPIErr *googleapi.Error - var awsRequestFailureErr awserr.RequestFailure - var awsRequestFailureErrV2 *awshttp.ResponseError + var awsRequestFailureErr *awshttp.ResponseError var azResponseErr *azcore.ResponseError var pgError *pgconn.PgError var myError *mysql.MyError switch err := trace.Unwrap(err); { case errors.As(err, &googleAPIErr): return convertGCPError(googleAPIErr) - case errors.As(err, &awsRequestFailureErrV2): - return awslib.ConvertRequestFailureErrorV2(awsRequestFailureErrV2) case errors.As(err, &awsRequestFailureErr): return awslib.ConvertRequestFailureError(awsRequestFailureErr) case errors.As(err, &azResponseErr): diff --git a/lib/srv/db/redis/engine.go b/lib/srv/db/redis/engine.go index 00142a4fdd92d..5644c452a0fe0 100644 --- a/lib/srv/db/redis/engine.go +++ b/lib/srv/db/redis/engine.go @@ -456,7 +456,7 @@ func (e *Engine) checkElastiCacheUserIAMAuthIsEnabled(ctx context.Context, awsMe input := elasticache.DescribeUsersInput{UserId: aws.String(username)} out, err := client.DescribeUsers(ctx, &input) if err != nil { - return false, trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return false, trace.Wrap(libaws.ConvertRequestFailureError(err)) } if len(out.Users) < 1 || out.Users[0].Authentication == nil { return false, nil @@ -477,7 +477,7 @@ func (e *Engine) checkMemoryDBUserIAMAuthIsEnabled(ctx context.Context, awsMeta input := memorydb.DescribeUsersInput{UserName: aws.String(username)} out, err := client.DescribeUsers(ctx, &input) if err != nil { - return false, trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return false, trace.Wrap(libaws.ConvertRequestFailureError(err)) } if len(out.Users) < 1 || out.Users[0].Authentication == nil { return false, nil diff --git a/lib/srv/db/secrets/aws_secrets_manager.go b/lib/srv/db/secrets/aws_secrets_manager.go index 70c96a326370d..81c1b867a768a 100644 --- a/lib/srv/db/secrets/aws_secrets_manager.go +++ b/lib/srv/db/secrets/aws_secrets_manager.go @@ -325,7 +325,7 @@ func convertSecretsManagerError(err error) error { } // Match by status code. - return trace.Wrap(libaws.ConvertRequestFailureErrorV2(err)) + return trace.Wrap(libaws.ConvertRequestFailureError(err)) } const ( diff --git a/lib/srv/discovery/common/renaming_test.go b/lib/srv/discovery/common/renaming_test.go index 7bb64f9f01bab..ae24023554742 100644 --- a/lib/srv/discovery/common/renaming_test.go +++ b/lib/srv/discovery/common/renaming_test.go @@ -27,9 +27,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/mysql/armmysqlflexibleservers" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redis/armredis/v3" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/redisenterprise/armredisenterprise" + "github.com/aws/aws-sdk-go-v2/aws" ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" rdstypes "github.com/aws/aws-sdk-go-v2/service/rds/types" - "github.com/aws/aws-sdk-go/aws" "github.com/google/uuid" "github.com/stretchr/testify/require" @@ -505,7 +505,7 @@ func makeEKSKubeCluster(t *testing.T, name, region, accountID, overrideLabel str overrideLabel: name, }, } - kubeCluster, err := NewKubeClusterFromAWSEKS(aws.StringValue(eksCluster.Name), aws.StringValue(eksCluster.Arn), eksCluster.Tags) + kubeCluster, err := NewKubeClusterFromAWSEKS(aws.ToString(eksCluster.Name), aws.ToString(eksCluster.Arn), eksCluster.Tags) require.NoError(t, err) require.True(t, kubeCluster.IsAWS()) return kubeCluster diff --git a/lib/srv/discovery/common/server.go b/lib/srv/discovery/common/server.go index 78893f88aed5a..a13db6f29217a 100644 --- a/lib/srv/discovery/common/server.go +++ b/lib/srv/discovery/common/server.go @@ -21,7 +21,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - ec2v1 "github.com/aws/aws-sdk-go/service/ec2" "github.com/gravitational/trace" "github.com/gravitational/teleport/api/types" @@ -76,30 +75,3 @@ func NewAWSNodeFromEC2Instance(instance ec2types.Instance, awsCloudMetadata *typ return server, nil } - -// NewAWSNodeFromEC2v1Instance creates a Node resource from an EC2 Instance. -// It has a pre-populated spec which contains info that is not available in the ec2.Instance object. -// Uses AWS SDK Go V1 -func NewAWSNodeFromEC2v1Instance(instance ec2v1.Instance, awsCloudMetadata *types.AWSInfo) (types.Server, error) { - server, err := NewAWSNodeFromEC2Instance(ec2InstanceV1ToV2(instance), awsCloudMetadata) - return server, trace.Wrap(err) -} - -func ec2InstanceV1ToV2(instance ec2v1.Instance) ec2types.Instance { - tags := make([]ec2types.Tag, 0, len(instance.Tags)) - for _, tag := range instance.Tags { - tags = append(tags, ec2types.Tag{ - Key: tag.Key, - Value: tag.Value, - }) - } - - return ec2types.Instance{ - InstanceId: instance.InstanceId, - VpcId: instance.VpcId, - SubnetId: instance.SubnetId, - PrivateIpAddress: instance.PrivateIpAddress, - PrivateDnsName: instance.PrivateDnsName, - Tags: tags, - } -} diff --git a/lib/srv/discovery/common/server_test.go b/lib/srv/discovery/common/server_test.go index 77eaa89ccddca..95de601ff95a6 100644 --- a/lib/srv/discovery/common/server_test.go +++ b/lib/srv/discovery/common/server_test.go @@ -23,7 +23,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - ec2v1 "github.com/aws/aws-sdk-go/service/ec2" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/gravitational/trace" @@ -281,254 +280,3 @@ func TestNewAWSNodeFromEC2Instance(t *testing.T) { }) } } - -func TestNewAWSNodeFromEC2v1Instance(t *testing.T) { - isBadParameterErr := func(tt require.TestingT, err error, i ...any) { - require.True(tt, trace.IsBadParameter(err), "expected bad parameter, got %v", err) - } - - makeEC2Instance := func(fn func(*ec2v1.Instance)) ec2v1.Instance { - s := ec2v1.Instance{ - PrivateDnsName: aws.String("my-private-dns.compute.aws"), - InstanceId: aws.String("i-123456789abcedf"), - VpcId: aws.String("vpc-abcd"), - SubnetId: aws.String("subnet-123"), - PrivateIpAddress: aws.String("172.31.1.1"), - Tags: []*ec2v1.Tag{ - { - Key: aws.String("MyTag"), - Value: aws.String("MyTagValue"), - }, - }, - } - fn(&s) - return s - } - - for _, tt := range []struct { - name string - ec2Instance ec2v1.Instance - awsCloudMetadata *types.AWSInfo - errCheck require.ErrorAssertionFunc - expectedServer types.Server - }{ - { - name: "valid", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) {}), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Region: "us-east-1", - Integration: "myintegration", - }, - errCheck: require.NoError, - expectedServer: &types.ServerV2{ - Kind: "node", - Version: "v2", - SubKind: "openssh-ec2-ice", - Metadata: types.Metadata{ - Labels: map[string]string{ - "account-id": "123456789012", - "region": "us-east-1", - "MyTag": "MyTagValue", - "teleport.dev/instance-id": "i-123456789abcedf", - "teleport.dev/account-id": "123456789012", - }, - Namespace: "default", - }, - Spec: types.ServerSpecV2{ - Addr: "172.31.1.1:22", - Hostname: "my-private-dns.compute.aws", - CloudMetadata: &types.CloudMetadata{ - AWS: &types.AWSInfo{ - AccountID: "123456789012", - InstanceID: "i-123456789abcedf", - Region: "us-east-1", - VPCID: "vpc-abcd", - SubnetID: "subnet-123", - Integration: "myintegration", - }, - }, - }, - }, - }, - - { - name: "valid with hostname override by tag", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) { - i.Tags = append(i.Tags, &ec2v1.Tag{ - Key: aws.String("TeleportHostname"), - Value: aws.String("my-custom-hostname"), - }) - }), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Region: "us-east-1", - Integration: "myintegration", - }, - errCheck: require.NoError, - expectedServer: &types.ServerV2{ - Kind: "node", - Version: "v2", - SubKind: "openssh-ec2-ice", - Metadata: types.Metadata{ - Labels: map[string]string{ - "account-id": "123456789012", - "region": "us-east-1", - "MyTag": "MyTagValue", - "TeleportHostname": "my-custom-hostname", - "teleport.dev/instance-id": "i-123456789abcedf", - "teleport.dev/account-id": "123456789012", - }, - Namespace: "default", - }, - Spec: types.ServerSpecV2{ - Addr: "172.31.1.1:22", - Hostname: "my-custom-hostname", - CloudMetadata: &types.CloudMetadata{ - AWS: &types.AWSInfo{ - AccountID: "123456789012", - InstanceID: "i-123456789abcedf", - Region: "us-east-1", - VPCID: "vpc-abcd", - SubnetID: "subnet-123", - Integration: "myintegration", - }, - }, - }, - }, - }, - { - name: "instance metadata generated labels are not replaced by instance tags", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) { - i.Tags = append(i.Tags, &ec2v1.Tag{ - Key: aws.String("region"), - Value: aws.String("evil"), - }) - i.Tags = append(i.Tags, &ec2v1.Tag{ - Key: aws.String("account-id"), - Value: aws.String("evil"), - }) - }), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Region: "us-east-1", - Integration: "myintegration", - }, - errCheck: require.NoError, - expectedServer: &types.ServerV2{ - Kind: "node", - Version: "v2", - SubKind: "openssh-ec2-ice", - Metadata: types.Metadata{ - Labels: map[string]string{ - "account-id": "123456789012", - "region": "us-east-1", - "MyTag": "MyTagValue", - "teleport.dev/instance-id": "i-123456789abcedf", - "teleport.dev/account-id": "123456789012", - }, - Namespace: "default", - }, - Spec: types.ServerSpecV2{ - Addr: "172.31.1.1:22", - Hostname: "my-private-dns.compute.aws", - CloudMetadata: &types.CloudMetadata{ - AWS: &types.AWSInfo{ - AccountID: "123456789012", - InstanceID: "i-123456789abcedf", - Region: "us-east-1", - VPCID: "vpc-abcd", - SubnetID: "subnet-123", - Integration: "myintegration", - }, - }, - }, - }, - }, - { - name: "missing ec2 private dns name", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) { - i.PrivateDnsName = nil - }), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Region: "us-east-1", - Integration: "myintegration", - }, - errCheck: isBadParameterErr, - }, - { - name: "missing ec2 instance id", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) { - i.InstanceId = nil - }), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Region: "us-east-1", - Integration: "myintegration", - }, - errCheck: isBadParameterErr, - }, - { - name: "missing ec2 vpc id", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) { - i.VpcId = nil - }), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Region: "us-east-1", - Integration: "myintegration", - }, - errCheck: isBadParameterErr, - }, - { - name: "missing ec2 private ip address", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) { - i.PrivateDnsName = nil - }), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Region: "us-east-1", - Integration: "myintegration", - }, - errCheck: isBadParameterErr, - }, - { - name: "missing account id", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) {}), - awsCloudMetadata: &types.AWSInfo{ - Region: "us-east-1", - Integration: "myintegration", - }, - errCheck: isBadParameterErr, - }, - { - name: "missing region", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) {}), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Integration: "myintegration", - }, - errCheck: isBadParameterErr, - }, - { - name: "missing integration name", - ec2Instance: makeEC2Instance(func(i *ec2v1.Instance) {}), - awsCloudMetadata: &types.AWSInfo{ - AccountID: "123456789012", - Region: "us-east-1", - }, - errCheck: isBadParameterErr, - }, - } { - t.Run(tt.name, func(t *testing.T) { - s, err := NewAWSNodeFromEC2v1Instance(tt.ec2Instance, tt.awsCloudMetadata) - tt.errCheck(t, err) - if err != nil { - return - } - - require.Empty(t, cmp.Diff(tt.expectedServer, s, cmpopts.IgnoreFields(types.ServerV2{}, "Metadata.Name"))) - }) - } -} diff --git a/lib/srv/discovery/fetchers/aws-sync/aws-sync.go b/lib/srv/discovery/fetchers/aws-sync/aws-sync.go index 146637659bba7..6567158fbbe9a 100644 --- a/lib/srv/discovery/fetchers/aws-sync/aws-sync.go +++ b/lib/srv/discovery/fetchers/aws-sync/aws-sync.go @@ -362,8 +362,8 @@ func (a *Fetcher) poll(ctx context.Context, features Features) (*Resources, erro return result, trace.NewAggregate(errs...) } -// getAWSOptions returns a list of options to be used when -// creating AWS clients with the v2 sdk. +// getAWSOptions returns a list of options to be used when creating AWS clients +// with the v2 sdk. func (a *Fetcher) getAWSOptions() []awsconfig.OptionsFn { opts := []awsconfig.OptionsFn{ awsconfig.WithCredentialsMaybeIntegration(a.Config.Integration), diff --git a/lib/srv/discovery/fetchers/db/aws_docdb.go b/lib/srv/discovery/fetchers/db/aws_docdb.go index ef1920d83d6b8..8632df542d7fb 100644 --- a/lib/srv/discovery/fetchers/db/aws_docdb.go +++ b/lib/srv/discovery/fetchers/db/aws_docdb.go @@ -21,9 +21,9 @@ package db import ( "context" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/rds" rdstypes "github.com/aws/aws-sdk-go-v2/service/rds/types" - "github.com/aws/aws-sdk-go/aws" "github.com/gravitational/trace" "github.com/gravitational/teleport/api/types" @@ -64,22 +64,22 @@ func (f *rdsDocumentDBFetcher) GetDatabases(ctx context.Context, cfg *awsFetcher for _, cluster := range clusters { if !libcloudaws.IsDocumentDBClusterSupported(&cluster) { cfg.Logger.DebugContext(ctx, "DocumentDB cluster doesn't support IAM authentication. Skipping.", - "cluster", aws.StringValue(cluster.DBClusterIdentifier), - "engine_version", aws.StringValue(cluster.EngineVersion)) + "cluster", aws.ToString(cluster.DBClusterIdentifier), + "engine_version", aws.ToString(cluster.EngineVersion)) continue } if !libcloudaws.IsDBClusterAvailable(cluster.Status, cluster.DBClusterIdentifier) { cfg.Logger.DebugContext(ctx, "DocumentDB cluster is not available. Skipping.", - "cluster", aws.StringValue(cluster.DBClusterIdentifier), - "status", aws.StringValue(cluster.Status)) + "cluster", aws.ToString(cluster.DBClusterIdentifier), + "status", aws.ToString(cluster.Status)) continue } dbs, err := common.NewDatabasesFromDocumentDBCluster(&cluster) if err != nil { cfg.Logger.WarnContext(ctx, "Could not convert DocumentDB cluster to database resources.", - "cluster", aws.StringValue(cluster.DBClusterIdentifier), + "cluster", aws.ToString(cluster.DBClusterIdentifier), "error", err) } databases = append(databases, dbs...) @@ -101,7 +101,7 @@ func (f *rdsDocumentDBFetcher) getAllDBClusters(ctx context.Context, clt RDSClie for i := 0; i < maxAWSPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } clusters = append(clusters, page.DBClusters...) } diff --git a/lib/srv/discovery/fetchers/db/aws_elasticache.go b/lib/srv/discovery/fetchers/db/aws_elasticache.go index b200e732e9b79..c5e417ed7c0f4 100644 --- a/lib/srv/discovery/fetchers/db/aws_elasticache.go +++ b/lib/srv/discovery/fetchers/db/aws_elasticache.go @@ -156,7 +156,7 @@ func getElastiCacheClusters(ctx context.Context, client ElastiCacheClient) ([]ec for i := 0; i < maxAWSPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } out = append(out, page.ReplicationGroups...) } @@ -176,7 +176,7 @@ func getElastiCacheNodes(ctx context.Context, client ElastiCacheClient) ([]ectyp for i := 0; i < maxAWSPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } // There are three types of ectypes.CacheCluster: // 1) a Memcache cluster. @@ -204,7 +204,7 @@ func getElastiCacheSubnetGroups(ctx context.Context, client ElastiCacheClient) ( for i := 0; i < maxAWSPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } out = append(out, page.CacheSubnetGroups...) } @@ -219,7 +219,7 @@ func getElastiCacheResourceTags(ctx context.Context, client ElastiCacheClient, r } output, err := client.ListTagsForResource(ctx, input) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } return output.TagList, nil diff --git a/lib/srv/discovery/fetchers/db/aws_memorydb.go b/lib/srv/discovery/fetchers/db/aws_memorydb.go index d4ab6189c1e9b..92398f3191b74 100644 --- a/lib/srv/discovery/fetchers/db/aws_memorydb.go +++ b/lib/srv/discovery/fetchers/db/aws_memorydb.go @@ -140,7 +140,7 @@ func getMemoryDBClusters(ctx context.Context, client MemoryDBClient) ([]memorydb for i := 0; i < maxAWSPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } out = append(out, page.Clusters...) } @@ -157,7 +157,7 @@ func getMemoryDBSubnetGroups(ctx context.Context, client MemoryDBClient) ([]memo for i := 0; i < maxAWSPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } out = append(out, page.SubnetGroups...) } diff --git a/lib/srv/discovery/fetchers/db/aws_opensearch.go b/lib/srv/discovery/fetchers/db/aws_opensearch.go index cb92f78da8b61..cd9db40b4dfc1 100644 --- a/lib/srv/discovery/fetchers/db/aws_opensearch.go +++ b/lib/srv/discovery/fetchers/db/aws_opensearch.go @@ -111,7 +111,7 @@ func (f *openSearchPlugin) GetDatabases(ctx context.Context, cfg *awsFetcherConf func getOpenSearchDomains(ctx context.Context, client OpenSearchClient) ([]opensearchtypes.DomainStatus, error) { names, err := client.ListDomainNames(ctx, &opensearch.ListDomainNamesInput{}) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } req := &opensearch.DescribeDomainsInput{} @@ -132,7 +132,7 @@ func getOpenSearchDomains(ctx context.Context, client OpenSearchClient) ([]opens func getOpenSearchResourceTags(ctx context.Context, client OpenSearchClient, resourceARN *string) ([]opensearchtypes.Tag, error) { output, err := client.ListTags(ctx, &opensearch.ListTagsInput{ARN: resourceARN}) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } return output.TagList, nil diff --git a/lib/srv/discovery/fetchers/db/aws_rds.go b/lib/srv/discovery/fetchers/db/aws_rds.go index 1b438873c8726..1dbe6c0380da1 100644 --- a/lib/srv/discovery/fetchers/db/aws_rds.go +++ b/lib/srv/discovery/fetchers/db/aws_rds.go @@ -137,7 +137,7 @@ func getAllDBInstancesWithFilters(ctx context.Context, clt RDSClient, maxPages i out = instances return nil }) - return out, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return out, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } // newRDSAuroraClustersFetcher returns a new AWS fetcher for RDS Aurora @@ -231,7 +231,7 @@ func getAllDBClusters(ctx context.Context, clt RDSClient, maxPages int, logger * out = clusters return nil }) - return out, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return out, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } // rdsInstanceEngines returns engines to make sure DescribeDBInstances call returns diff --git a/lib/srv/discovery/fetchers/db/aws_rds_proxy.go b/lib/srv/discovery/fetchers/db/aws_rds_proxy.go index 59adf7f7f5b88..d297434e17200 100644 --- a/lib/srv/discovery/fetchers/db/aws_rds_proxy.go +++ b/lib/srv/discovery/fetchers/db/aws_rds_proxy.go @@ -145,7 +145,7 @@ func getRDSProxies(ctx context.Context, clt RDSClient, maxPages int) ([]rdstypes for i := 0; i < maxPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } rdsProxies = append(rdsProxies, page.DBProxies...) } @@ -165,7 +165,7 @@ func getRDSProxyCustomEndpoints(ctx context.Context, clt RDSClient, maxPages int for i := 0; i < maxPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } for _, customEndpoint := range page.DBProxyEndpoints { customEndpointsByProxyName[aws.ToString(customEndpoint.DBProxyName)] = append(customEndpointsByProxyName[aws.ToString(customEndpoint.DBProxyName)], customEndpoint) @@ -180,7 +180,7 @@ func listRDSResourceTags(ctx context.Context, clt RDSClient, resourceName *strin ResourceName: resourceName, }) if err != nil { - return nil, trace.Wrap(libcloudaws.ConvertRequestFailureErrorV2(err)) + return nil, trace.Wrap(libcloudaws.ConvertRequestFailureError(err)) } return output.TagList, nil } diff --git a/lib/srv/discovery/fetchers/db/aws_redshift.go b/lib/srv/discovery/fetchers/db/aws_redshift.go index ccfa726e36e9e..351d438bf68e1 100644 --- a/lib/srv/discovery/fetchers/db/aws_redshift.go +++ b/lib/srv/discovery/fetchers/db/aws_redshift.go @@ -100,7 +100,7 @@ func getRedshiftClusters(ctx context.Context, clt RedshiftClient) ([]redshifttyp for pageNum := 0; pageNum < maxAWSPages && pager.HasMorePages(); pageNum++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, libcloudaws.ConvertRequestFailureErrorV2(err) + return nil, libcloudaws.ConvertRequestFailureError(err) } clusters = append(clusters, page.Clusters...) } diff --git a/lib/srv/discovery/fetchers/db/aws_redshift_serverless.go b/lib/srv/discovery/fetchers/db/aws_redshift_serverless.go index 81dfb6cc27468..02cc5528e9683 100644 --- a/lib/srv/discovery/fetchers/db/aws_redshift_serverless.go +++ b/lib/srv/discovery/fetchers/db/aws_redshift_serverless.go @@ -197,7 +197,7 @@ func getRSSWorkgroups(ctx context.Context, clt RSSClient) ([]rsstypes.Workgroup, for i := 0; i < maxAWSPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, libcloudaws.ConvertRequestFailureErrorV2(err) + return nil, libcloudaws.ConvertRequestFailureError(err) } out = append(out, page.Workgroups...) } @@ -215,7 +215,7 @@ func getRSSVPCEndpoints(ctx context.Context, clt RSSClient) ([]rsstypes.Endpoint for i := 0; i < maxAWSPages && pager.HasMorePages(); i++ { page, err := pager.NextPage(ctx) if err != nil { - return nil, libcloudaws.ConvertRequestFailureErrorV2(err) + return nil, libcloudaws.ConvertRequestFailureError(err) } out = append(out, page.Endpoints...) } diff --git a/lib/srv/discovery/fetchers/db/aws_redshift_serverless_test.go b/lib/srv/discovery/fetchers/db/aws_redshift_serverless_test.go index bf657836520c6..1f1261b9e7491 100644 --- a/lib/srv/discovery/fetchers/db/aws_redshift_serverless_test.go +++ b/lib/srv/discovery/fetchers/db/aws_redshift_serverless_test.go @@ -21,8 +21,8 @@ package db import ( "testing" + "github.com/aws/aws-sdk-go-v2/aws" rsstypes "github.com/aws/aws-sdk-go-v2/service/redshiftserverless/types" - "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/api/types" @@ -39,8 +39,8 @@ func TestRedshiftServerlessFetcher(t *testing.T) { endpointProd, endpointProdDB := makeRedshiftServerlessEndpoint(t, workgroupProd, "endpoint1", "us-east-1", envProdLabels) endpointDev, endpointProdDev := makeRedshiftServerlessEndpoint(t, workgroupDev, "endpoint2", "us-east-1", envDevLabels) tagsByARN := map[string][]rsstypes.Tag{ - aws.StringValue(workgroupProd.WorkgroupArn): awstesthelpers.LabelsToRedshiftServerlessTags(envProdLabels), - aws.StringValue(workgroupDev.WorkgroupArn): awstesthelpers.LabelsToRedshiftServerlessTags(envDevLabels), + aws.ToString(workgroupProd.WorkgroupArn): awstesthelpers.LabelsToRedshiftServerlessTags(envProdLabels), + aws.ToString(workgroupDev.WorkgroupArn): awstesthelpers.LabelsToRedshiftServerlessTags(envDevLabels), } workgroupNotAvailable := mocks.RedshiftServerlessWorkgroup("wg-creating", "us-east-1") From f2401d54c3db373cd85f508390c4bd3a08bcf2da Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Thu, 30 Jan 2025 18:41:47 -0500 Subject: [PATCH 15/28] Stop using default aws http client in access plugins (#51683) Updates the servicenow and opsgenie plugins to create an http client manually like all other access plugins. In addition to unifying client creation, this allows us to remove the dependency on the legacy aws sdk. --- integrations/access/opsgenie/client.go | 21 ++++++++++++++------- integrations/access/servicenow/client.go | 20 ++++++++++++++------ 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/integrations/access/opsgenie/client.go b/integrations/access/opsgenie/client.go index 2c8cdaec09a33..191f0be4670e5 100644 --- a/integrations/access/opsgenie/client.go +++ b/integrations/access/opsgenie/client.go @@ -27,7 +27,6 @@ import ( "text/template" "time" - "github.com/aws/aws-sdk-go/aws/defaults" "github.com/go-resty/resty/v2" "github.com/google/uuid" "github.com/gravitational/trace" @@ -115,12 +114,20 @@ func (cfg *ClientConfig) CheckAndSetDefaults() error { // NewClient creates a new Opsgenie client for managing alerts. func NewClient(conf ClientConfig) (*Client, error) { - client := resty.NewWithClient(defaults.Config().HTTPClient) - client.SetTransport(&http.Transport{ - Proxy: http.ProxyFromEnvironment, - }) - client.SetHeader("Authorization", "GenieKey "+conf.APIKey) - client.SetBaseURL(conf.APIEndpoint) + const ( + maxConns = 100 + clientTimeout = 10 * time.Second + ) + + client := resty.NewWithClient(&http.Client{ + Timeout: clientTimeout, + Transport: &http.Transport{ + MaxConnsPerHost: maxConns, + MaxIdleConnsPerHost: maxConns, + Proxy: http.ProxyFromEnvironment, + }}). + SetHeader("Authorization", "GenieKey "+conf.APIKey). + SetBaseURL(conf.APIEndpoint) return &Client{ client: client, ClientConfig: conf, diff --git a/integrations/access/servicenow/client.go b/integrations/access/servicenow/client.go index 8c306c1efa4ee..857fae1913062 100644 --- a/integrations/access/servicenow/client.go +++ b/integrations/access/servicenow/client.go @@ -27,7 +27,6 @@ import ( "text/template" "time" - "github.com/aws/aws-sdk-go/aws/defaults" "github.com/go-resty/resty/v2" "github.com/gravitational/trace" @@ -101,10 +100,7 @@ func NewClient(conf ClientConfig) (*Client, error) { if err := conf.checkAndSetDefaults(); err != nil { return nil, trace.Wrap(err) } - client := resty.NewWithClient(defaults.Config().HTTPClient) - client.SetTransport(&http.Transport{ - Proxy: http.ProxyFromEnvironment, - }) + apiURL, err := url.Parse(conf.APIEndpoint) if err != nil { return nil, trace.Wrap(err) @@ -117,7 +113,19 @@ func NewClient(conf ClientConfig) (*Client, error) { apiURL.Scheme = "https" } - client.SetBaseURL(conf.APIEndpoint). + const ( + maxConns = 100 + clientTimeout = 10 * time.Second + ) + + client := resty.NewWithClient(&http.Client{ + Timeout: clientTimeout, + Transport: &http.Transport{ + MaxConnsPerHost: maxConns, + MaxIdleConnsPerHost: maxConns, + Proxy: http.ProxyFromEnvironment, + }}). + SetBaseURL(apiURL.String()). SetHeader("Content-Type", "application/json"). SetHeader("Accept", "application/json"). SetBasicAuth(conf.Username, conf.APIToken) From 2f9bd2af8f4e39faf7b9aec1da1eb430f41e9261 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Thu, 30 Jan 2025 18:42:28 -0500 Subject: [PATCH 16/28] Remove unused aws-sdk-go S3 metrics wrapper (#51674) The wrapper is no longer used since all S3 code has been converted to use aws-sdk-go-v2. --- lib/events/s3sessions/s3handler.go | 6 +- lib/observability/metrics/s3/manager.go | 74 ------------------------- lib/observability/metrics/s3/s3.go | 27 +++------ 3 files changed, 12 insertions(+), 95 deletions(-) delete mode 100644 lib/observability/metrics/s3/manager.go diff --git a/lib/events/s3sessions/s3handler.go b/lib/events/s3sessions/s3handler.go index 6b9da90ca8db6..c4f48ff203f73 100644 --- a/lib/events/s3sessions/s3handler.go +++ b/lib/events/s3sessions/s3handler.go @@ -48,6 +48,7 @@ import ( "github.com/gravitational/teleport/lib/events" "github.com/gravitational/teleport/lib/modules" awsmetrics "github.com/gravitational/teleport/lib/observability/metrics/aws" + s3metrics "github.com/gravitational/teleport/lib/observability/metrics/s3" "github.com/gravitational/teleport/lib/session" awsutils "github.com/gravitational/teleport/lib/utils/aws" "github.com/gravitational/teleport/lib/utils/aws/endpoint" @@ -197,7 +198,10 @@ func NewHandler(ctx context.Context, cfg Config) (*Handler, error) { opts = append(opts, config.WithCredentialsProvider(cfg.CredentialsProvider)) } - opts = append(opts, config.WithAPIOptions(awsmetrics.MetricsMiddleware())) + opts = append(opts, + config.WithAPIOptions(awsmetrics.MetricsMiddleware()), + config.WithAPIOptions(s3metrics.MetricsMiddleware()), + ) resolver, err := endpoint.NewLoggingResolver( s3.NewDefaultEndpointResolverV2(), diff --git a/lib/observability/metrics/s3/manager.go b/lib/observability/metrics/s3/manager.go deleted file mode 100644 index 324141f6b7b2d..0000000000000 --- a/lib/observability/metrics/s3/manager.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Teleport - * Copyright (C) 2023 Gravitational, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Affero General Public License for more details. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -package s3 - -import ( - "context" - "io" - "time" - - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface" - "github.com/gravitational/trace" - - "github.com/gravitational/teleport/lib/observability/metrics" -) - -type UploadAPIMetrics struct { - s3manageriface.UploaderAPI -} - -func NewUploadAPIMetrics(api s3manageriface.UploaderAPI) (*UploadAPIMetrics, error) { - if err := metrics.RegisterPrometheusCollectors(s3Collectors...); err != nil { - return nil, trace.Wrap(err) - } - - return &UploadAPIMetrics{UploaderAPI: api}, nil -} - -func (m *UploadAPIMetrics) UploadWithContext(ctx context.Context, input *s3manager.UploadInput, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) { - start := time.Now() - output, err := m.UploaderAPI.UploadWithContext(ctx, input, opts...) - - recordMetrics("upload", err, time.Since(start).Seconds()) - - return output, err -} - -type DownloadAPIMetrics struct { - s3manageriface.DownloaderAPI -} - -func NewDownloadAPIMetrics(api s3manageriface.DownloaderAPI) (*DownloadAPIMetrics, error) { - if err := metrics.RegisterPrometheusCollectors(s3Collectors...); err != nil { - return nil, trace.Wrap(err) - } - - return &DownloadAPIMetrics{DownloaderAPI: api}, nil -} - -func (m *DownloadAPIMetrics) DownloadWithContext(ctx context.Context, w io.WriterAt, input *s3.GetObjectInput, opts ...func(*s3manager.Downloader)) (int64, error) { - start := time.Now() - n, err := m.DownloaderAPI.DownloadWithContext(ctx, w, input, opts...) - - recordMetrics("download", err, time.Since(start).Seconds()) - - return n, err -} diff --git a/lib/observability/metrics/s3/s3.go b/lib/observability/metrics/s3/s3.go index a803fc587078b..eb738f47ee97c 100644 --- a/lib/observability/metrics/s3/s3.go +++ b/lib/observability/metrics/s3/s3.go @@ -25,6 +25,8 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/smithy-go/middleware" "github.com/prometheus/client_golang/prometheus" + + "github.com/gravitational/teleport/lib/observability/metrics" ) var ( @@ -52,24 +54,10 @@ var ( }, []string{"operation"}, ) - - s3Collectors = []prometheus.Collector{ - apiRequestsTotal, - apiRequests, - apiRequestLatencies, - } ) -// recordMetrics updates the set of s3 api metrics -func recordMetrics(operation string, err error, latency float64) { - apiRequestLatencies.WithLabelValues(operation).Observe(latency) - apiRequestsTotal.WithLabelValues(operation).Inc() - - result := "success" - if err != nil { - result = "error" - } - apiRequests.WithLabelValues(operation, result).Inc() +func init() { + _ = metrics.RegisterPrometheusCollectors(apiRequests, apiRequestsTotal, apiRequestLatencies) } // MetricsMiddleware returns middleware that can be used to capture @@ -97,13 +85,12 @@ func MetricsMiddleware() []func(stack *middleware.Stack) error { } then := ctx.Value(timestampKey{}).(time.Time) - service := awsmiddleware.GetServiceID(ctx) operation := awsmiddleware.GetOperationName(ctx) latency := time.Since(then).Seconds() - apiRequestsTotal.WithLabelValues(service, operation).Inc() - apiRequestLatencies.WithLabelValues(service, operation).Observe(latency) - apiRequests.WithLabelValues(service, operation, result).Inc() + apiRequestsTotal.WithLabelValues(operation).Inc() + apiRequestLatencies.WithLabelValues(operation).Observe(latency) + apiRequests.WithLabelValues(operation, result).Inc() return out, md, err }), middleware.After) From f18259727d4d49456a49c9065fb343085993ffbe Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Fri, 31 Jan 2025 09:02:17 +0000 Subject: [PATCH 17/28] Allow Regions query when listing ECS Services (#51585) Listing ECS Services requires an AWS Region. Instead of guessing the AWS Regions, this PR allows the API Client to send a list of AWS Regions which are used call the ECS APIs --- lib/web/integrations_awsoidc.go | 31 +++++++++++++++++++- lib/web/integrations_awsoidc_test.go | 43 ++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/lib/web/integrations_awsoidc.go b/lib/web/integrations_awsoidc.go index 2dcdb01dcd4a0..ce20aaff25c6c 100644 --- a/lib/web/integrations_awsoidc.go +++ b/lib/web/integrations_awsoidc.go @@ -275,7 +275,7 @@ func (h *Handler) awsOIDCListDeployedDatabaseService(w http.ResponseWriter, r *h return nil, trace.BadParameter("an integration name is required") } - regions, err := fetchRelevantAWSRegions(ctx, clt, clt.DiscoveryConfigClient()) + regions, err := regionsForListingDeployedDatabaseService(ctx, r, clt, clt.DiscoveryConfigClient()) if err != nil { return nil, trace.Wrap(err) } @@ -290,6 +290,35 @@ func (h *Handler) awsOIDCListDeployedDatabaseService(w http.ResponseWriter, r *h }, nil } +func extractAWSRegionsFromQuery(r *http.Request) ([]string, error) { + var ret []string + for _, region := range r.URL.Query()["regions"] { + if err := aws.IsValidRegion(region); err != nil { + return nil, trace.BadParameter("invalid region %s", region) + } + ret = append(ret, region) + } + + return ret, nil +} + +func regionsForListingDeployedDatabaseService(ctx context.Context, r *http.Request, authClient databaseGetter, discoveryConfigsClient discoveryConfigLister) ([]string, error) { + if r.URL.Query().Has("regions") { + regions, err := extractAWSRegionsFromQuery(r) + if err != nil { + return nil, trace.Wrap(err) + } + return regions, err + } + + regions, err := fetchRelevantAWSRegions(ctx, authClient, discoveryConfigsClient) + if err != nil { + return nil, trace.Wrap(err) + } + + return regions, nil +} + type databaseGetter interface { GetResources(ctx context.Context, req *proto.ListResourcesRequest) (*proto.ListResourcesResponse, error) GetDatabases(context.Context) ([]types.Database, error) diff --git a/lib/web/integrations_awsoidc_test.go b/lib/web/integrations_awsoidc_test.go index c8c7f4a0cf765..62c3ca8ce692b 100644 --- a/lib/web/integrations_awsoidc_test.go +++ b/lib/web/integrations_awsoidc_test.go @@ -22,6 +22,7 @@ import ( "context" "encoding/json" "fmt" + "net/http" "net/url" "strconv" "strings" @@ -1326,6 +1327,48 @@ func dummyDeployedDatabaseServices(count int, command []string) []*integrationv1 return ret } +func TestRegionsForListingDeployedDatabaseService(t *testing.T) { + ctx := context.Background() + + t.Run("regions query param is used instead of parsing internal resources", func(t *testing.T) { + clt := &mockRelevantAWSRegionsClient{ + databaseServices: &proto.ListResourcesResponse{ + Resources: []*proto.PaginatedResource{}, + }, + databases: make([]types.Database, 0), + discoveryConfigs: make([]*discoveryconfig.DiscoveryConfig, 0), + } + r := http.Request{ + URL: &url.URL{RawQuery: "regions=us-east-1®ions=us-east-2"}, + } + gotRegions, err := regionsForListingDeployedDatabaseService(ctx, &r, clt, clt) + require.NoError(t, err) + require.ElementsMatch(t, []string{"us-east-1", "us-east-2"}, gotRegions) + }) + + t.Run("fallbacks to internal resources when query param is not present", func(t *testing.T) { + clt := &mockRelevantAWSRegionsClient{ + databaseServices: &proto.ListResourcesResponse{ + Resources: []*proto.PaginatedResource{{Resource: &proto.PaginatedResource_DatabaseService{ + DatabaseService: &types.DatabaseServiceV1{Spec: types.DatabaseServiceSpecV1{ + ResourceMatchers: []*types.DatabaseResourceMatcher{ + {Labels: &types.Labels{"region": []string{"us-east-1"}}}, + {Labels: &types.Labels{"region": []string{"us-east-2"}}}, + }, + }}, + }}}, + }, + databases: make([]types.Database, 0), + discoveryConfigs: make([]*discoveryconfig.DiscoveryConfig, 0), + } + r := http.Request{ + URL: &url.URL{}, + } + gotRegions, err := regionsForListingDeployedDatabaseService(ctx, &r, clt, clt) + require.NoError(t, err) + require.ElementsMatch(t, []string{"us-east-1", "us-east-2"}, gotRegions) + }) +} func TestFetchRelevantAWSRegions(t *testing.T) { ctx := context.Background() From 88e1cd402e7264de230ae3dece2cc37e5e20ade1 Mon Sep 17 00:00:00 2001 From: Pawel Kopiczko Date: Fri, 31 Jan 2025 09:35:23 +0000 Subject: [PATCH 18/28] Revert "Add temporary protobuf liter exception" (#51699) This reverts commit 6fc264ae2ca5e0d00947015a72dcc49a14bd9326 from PR #51637. --- buf.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/buf.yaml b/buf.yaml index ccdcedcce111e..8f01bbe2dea21 100644 --- a/buf.yaml +++ b/buf.yaml @@ -84,8 +84,6 @@ breaking: ignore: # TODO(codingllama): Remove ignore once the PDP API is stable. - api/proto/teleport/decision/v1alpha1 - # TODO(kopiczko) remove after https://github.com/gravitational/teleport/pull/51637 is merged - - api/proto/teleport/okta/v1/okta_service.proto ignore_only: RESERVED_ENUM_NO_DELETE: - api/proto/teleport/legacy/types/types.proto From 1edb69677b2e601e321df1fbafc23743ff94625f Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Fri, 31 Jan 2025 06:34:17 -0500 Subject: [PATCH 19/28] Convert examples/teleport-usage to aws-sdk-go-v2 (#51677) * Convert examples/teleport-usage to aws-sdk-go-v2 * fix: apply fips to global config --- examples/teleport-usage/go.mod | 18 +++++++- examples/teleport-usage/go.sum | 36 +++++++++++++++- examples/teleport-usage/main.go | 76 +++++++++++++-------------------- 3 files changed, 80 insertions(+), 50 deletions(-) diff --git a/examples/teleport-usage/go.mod b/examples/teleport-usage/go.mod index 9c009bebf497a..cdf1098b60673 100644 --- a/examples/teleport-usage/go.mod +++ b/examples/teleport-usage/go.mod @@ -3,11 +3,27 @@ module usage-script go 1.22 require ( - github.com/aws/aws-sdk-go v1.47.4 + github.com/aws/aws-sdk-go-v2 v1.35.0 + github.com/aws/aws-sdk-go-v2/config v1.29.3 + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.17.0 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.39.7 github.com/stretchr/testify v1.8.3 ) require ( + github.com/aws/aws-sdk-go-v2/credentials v1.17.56 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.24.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.13 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.11 // indirect + github.com/aws/smithy-go v1.22.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/examples/teleport-usage/go.sum b/examples/teleport-usage/go.sum index ba1d2e24403b0..3a30d379e0bf3 100644 --- a/examples/teleport-usage/go.sum +++ b/examples/teleport-usage/go.sum @@ -1,5 +1,37 @@ -github.com/aws/aws-sdk-go v1.47.4 h1:IyhNbmPt+5ldi5HNzv7ZnXiqSglDMaJiZlzj4Yq3qnk= -github.com/aws/aws-sdk-go v1.47.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.35.0 h1:jTPxEJyzjSuuz0wB+302hr8Eu9KUI+Zv8zlujMGJpVI= +github.com/aws/aws-sdk-go-v2 v1.35.0/go.mod h1:JgstGg0JjWU1KpVJjD5H0y0yyAIpSdKEq556EI6yOOM= +github.com/aws/aws-sdk-go-v2/config v1.29.3 h1:a5Ucjxe6iV+LHEBmYA9w40rT5aGxWybx/4l/O/fvJlE= +github.com/aws/aws-sdk-go-v2/config v1.29.3/go.mod h1:pt9z1x12zDiDb4iFLrxoeAKLVCU/Gp9DL/5BnwlY77o= +github.com/aws/aws-sdk-go-v2/credentials v1.17.56 h1:JKMBreKudV+ozx6rZJLvEtiexv48aEdhdC7mXUw9MLs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.56/go.mod h1:S3xRjIHD8HHFgMTz4L56q/7IldfNtGL9JjH/vP3U6DA= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.17.0 h1:OljitD0YIY2qkKpHChC+CMjKywEsqDLhUlHOI2AseXQ= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.17.0/go.mod h1:bcffXfieyW3VfH02hxx6MBuCU9UOBRguc4iS7mV7V9E= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26 h1:XMBqBEuZLf8yxtH+mU/uUDyQbN4iD/xv9h6he2+lzhw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26/go.mod h1:d0+wQ/3CYGPuHEfBTPpQdfUX7gjk0/Lxs5Q6KzdEGY8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 h1:+7AzSGNhHoY53di13lvztf9Dyd/9ofzoYGBllkWp3a0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30/go.mod h1:Jxd/FrCny99yURiQiMywgXvBhd7tmgdv6KdlUTNzMSo= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 h1:Ex06eY6I5rO7IX0HalGfa5nGjpBoOsS1Qm3xfjkuszs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30/go.mod h1:AvyEMA9QcX59kFhVizBpIBpEMThUTXssuJe+emBdcGM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.39.7 h1:JFLdDS6ZGKoZii7O+9IBsuvCnvW2vSbseNBji8OKEo8= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.39.7/go.mod h1:8blEsG2cwaS8BK1YiWSEWFwmVav7i7EJk5swid5Vhcw= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.24.17 h1:jPqYzzklr/WkOk5imqvgpm4MkGLoXs6daKsoQSQiSrg= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.24.17/go.mod h1:DRtG2Ux6Ba26Q+bt/ef7gHa10ilrfqobnAAnmBIPnuk= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.11 h1:f36sb0FYLZui8mzV6o8DxkUyvOdZfkemyCPTGDJdWhE= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.11/go.mod h1:MaBbVwqDmlH9ytOOcERyVQ+Z6nvWkEdRy0k44m3MYkE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 h1:5JKQ2J3BBW4ovy6A/5Lwx9SpA6IzgH8jB3bquGZ1NUw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11/go.mod h1:VShCk7rfCzK/b9U1aSkzLwcOoaDlYna16482QqEavis= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.13 h1:q4pOAKxypbFoUJzOpgo939bF50qb4DgYshiDfcsdN0M= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.13/go.mod h1:G/0PTg7+vQT42ictQGjJhixzTcVZtHFvrN/OeTXrRfQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12 h1:4sGSGshSSfO1vrcXruPick3ioSf8nhhD6nuB2ni37P4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12/go.mod h1:NHpu/pLOelViA4qxkAFH10VLqh+XeLhZfXDaFyMVgSs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.11 h1:RIXOjp7Dp4siCYJRwBHUcBdVgOWflSJGlq4ZhMI5Ta0= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.11/go.mod h1:ZR17k9bPKPR8u0IkyA6xVsjr56doNQ4ZB1fs7abYBfE= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/examples/teleport-usage/main.go b/examples/teleport-usage/main.go index 0e9a12a5720d3..45a97b733539a 100644 --- a/examples/teleport-usage/main.go +++ b/examples/teleport-usage/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "crypto/sha256" "errors" "fmt" @@ -29,13 +30,11 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/endpoints" - awsrequest "github.com/aws/aws-sdk-go/aws/request" - awssession "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" ) const ( @@ -60,39 +59,33 @@ func main() { fmt.Println("Gathering data, this may take a moment") - // Assume a base read capacity of 25 units per second to start off. - // If this is too high and we encounter throttling that could impede Teleport, it will be adjusted automatically. - limiter := newAdaptiveRateLimiter(25) + ctx := context.Background() + + configOpts := []func(*config.LoadOptions) error{config.WithRegion(params.awsRegion)} // Check the package name for one of the boring primitives. If the package // path is from BoringCrypto, we know this binary was compiled using // `GOEXPERIMENT=boringcrypto`. hash := sha256.New() - useFIPSEndpoint := endpoints.FIPSEndpointStateUnset if reflect.TypeOf(hash).Elem().PkgPath() == "crypto/internal/boring" { - useFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + configOpts = append(configOpts, config.WithUseFIPSEndpoint(aws.FIPSEndpointStateEnabled)) } - // create an AWS session using default SDK behavior, i.e. it will interpret - // the environment and ~/.aws directory just like an AWS CLI tool would: - session, err := awssession.NewSessionWithOptions(awssession.Options{ - SharedConfigState: awssession.SharedConfigEnable, - Config: aws.Config{ - Retryer: limiter, - Region: aws.String(params.awsRegion), - CredentialsChainVerboseErrors: aws.Bool(true), - UseFIPSEndpoint: useFIPSEndpoint, - }, - }) + awsConfig, err := config.LoadDefaultConfig(ctx, configOpts...) if err != nil { log.Fatal(err) } - // Reduce internal retry count so throttling errors bubble up to our rate limiter with less delay. - svc := dynamodb.New(session) + // Assume a base read capacity of 25 units per second to start off. + // If this is too high and we encounter throttling that could impede Teleport, it will be adjusted automatically. + limiter := newAdaptiveRateLimiter(25) + + svc := dynamodb.NewFromConfig(awsConfig, func(o *dynamodb.Options) { + o.Retryer = aws.NopRetryer{} + }) for _, date := range daysBetween(params.startDate, params.startDate.Add(scanDuration)) { - err := scanDay(svc, limiter, params.tableName, date, state) + err := scanDay(ctx, svc, limiter, params.tableName, date, state) if err != nil { log.Fatal(err) } @@ -123,7 +116,7 @@ func displayProductResults(name string, users map[string]struct{}, showUsers boo } // scanDay scans a single day of events from the audit log table. -func scanDay(svc *dynamodb.DynamoDB, limiter *adaptiveRateLimiter, tableName string, date string, state *trackedState) error { +func scanDay(ctx context.Context, svc dynamodb.QueryAPIClient, limiter *adaptiveRateLimiter, tableName string, date string, state *trackedState) error { attributes := map[string]interface{}{ ":date": date, ":e1": "session.start", @@ -133,31 +126,32 @@ func scanDay(svc *dynamodb.DynamoDB, limiter *adaptiveRateLimiter, tableName str ":e5": "kube.request", } - attributeValues, err := dynamodbattribute.MarshalMap(attributes) + attributeValues, err := attributevalue.MarshalMap(attributes) if err != nil { return err } - var paginationKey map[string]*dynamodb.AttributeValue + var paginationKey map[string]types.AttributeValue pageCount := 1 outer: for { fmt.Printf(" scanning date %v page %v...\n", date, pageCount) - scanOut, err := svc.Query(&dynamodb.QueryInput{ + scanOut, err := svc.Query(ctx, &dynamodb.QueryInput{ TableName: aws.String(tableName), IndexName: aws.String(indexName), KeyConditionExpression: aws.String("CreatedAtDate = :date"), ExpressionAttributeValues: attributeValues, FilterExpression: aws.String("EventType IN (:e1, :e2, :e3, :e4, :e5)"), ExclusiveStartKey: paginationKey, - ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), + ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal, // We limit the number of items returned to the current capacity to minimize any usage spikes // that could affect Teleport as RCUs may be consumed for multiple seconds if the response is large, slowing down Teleport significantly. - Limit: aws.Int64(int64(limiter.currentCapacity())), + Limit: aws.Int32(int32(limiter.currentCapacity())), }) if err != nil { - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException { + var throughputExceededError *types.ProvisionedThroughputExceededException + if errors.As(err, &throughputExceededError) { fmt.Println(" throttled by DynamoDB, adjusting request rate...") limiter.reportThrottleError() continue outer @@ -191,10 +185,10 @@ type event struct { } // applies a set of scanned raw events onto the tracked state. -func reduceEvents(rawEvents []map[string]*dynamodb.AttributeValue, state *trackedState) error { +func reduceEvents(rawEvents []map[string]types.AttributeValue, state *trackedState) error { for _, rawEvent := range rawEvents { var event event - err := dynamodbattribute.UnmarshalMap(rawEvent, &event) + err := attributevalue.UnmarshalMap(rawEvent, &event) if err != nil { log.Fatal(err) } @@ -354,18 +348,6 @@ func (a *adaptiveRateLimiter) currentCapacity() float64 { return a.permitCapacity } -func (a *adaptiveRateLimiter) RetryRules(r *awsrequest.Request) time.Duration { - return 0 -} - -func (a *adaptiveRateLimiter) ShouldRetry(*awsrequest.Request) bool { - return false -} - -func (a *adaptiveRateLimiter) MaxRetries() int { - return 0 -} - func newAdaptiveRateLimiter(permitsPerSecond float64) *adaptiveRateLimiter { fmt.Printf(" setting initial read rate to %v RCUs\n", int(permitsPerSecond)) return &adaptiveRateLimiter{ From a20a7630ebdc77cac9e47d45c93573855443b0ce Mon Sep 17 00:00:00 2001 From: Steven Martin Date: Fri, 31 Jan 2025 06:39:39 -0500 Subject: [PATCH 20/28] update tsh github login command description (#51605) --- tool/tsh/common/git_login.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tool/tsh/common/git_login.go b/tool/tsh/common/git_login.go index fde5546471cd2..335a5b76fef36 100644 --- a/tool/tsh/common/git_login.go +++ b/tool/tsh/common/git_login.go @@ -37,7 +37,7 @@ type gitLoginCommand struct { func newGitLoginCommand(parent *kingpin.CmdClause) *gitLoginCommand { cmd := &gitLoginCommand{ - CmdClause: parent.Command("login", "Opens a browser and retrieves your login from GitHub"), + CmdClause: parent.Command("login", "Opens a browser and retrieves your login from GitHub."), } // TODO(greedy52) make "github-org" optional. Most likely there is only a From 724457ad34f70659d3c6ef9805d37c711e8190b1 Mon Sep 17 00:00:00 2001 From: Edoardo Spadolini Date: Fri, 31 Jan 2025 12:43:10 +0100 Subject: [PATCH 21/28] Stable UNIX users: docs (#51662) * Stable user UID addition to the host user creation guide * tctl stable-unix-users-ls docs * CAP resource reference * auth service reference * Uncomment defaulted field in fileconf * Clarify conditions for stable uids --- .../guides/host-user-creation.mdx | 39 +++++++++++++++++++ .../config-reference/auth-service.yaml | 10 +++++ docs/pages/reference/cli/tctl.mdx | 30 ++++++++++++++ docs/pages/reference/resources.mdx | 9 +++++ 4 files changed, 88 insertions(+) diff --git a/docs/pages/enroll-resources/server-access/guides/host-user-creation.mdx b/docs/pages/enroll-resources/server-access/guides/host-user-creation.mdx index ef6a00aecce44..5593563ed6258 100644 --- a/docs/pages/enroll-resources/server-access/guides/host-user-creation.mdx +++ b/docs/pages/enroll-resources/server-access/guides/host-user-creation.mdx @@ -218,6 +218,45 @@ If multiple entries are specified in the `host_user_uid` or `host_user_gid` only +For Teleport users that don't have a `host_user_uid` trait, starting from Teleport 17.3, it's possible to configure the cluster to assign the same UID for any given username for automatically created host users across all Teleport SSH instances. + +Edit your cluster authentication preference using the following command: + +```code +$ tctl edit cluster_auth_preference +``` + +In your editor, ensure that your `cluster_auth_preference` includes a `stable_unix_user_config` field similar to the following: + +```yaml +kind: cluster_auth_preference +version: v2 +metadata: + name: cluster-auth-preference +spec: + # ... + stable_unix_user_config: + enabled: true + first_uid: 90000 + last_uid: 95000 +``` + +The range of UIDs between `first_uid` and `last_uid` (inclusive) will be used by Teleport to assign UIDs for new users, and you should make sure it's not a range of UIDs already in use by other services or workloads across your fleet of servers. While UID allocations in Linux can vary, we recommend sticking to one of the available ranges listed in the [systemd documentation for "Users, Groups, UIDs and GIDs on systemd Systems"](https://github.com/systemd/systemd/blob/main/docs/UIDS-GIDS.md). + +Once enabled, all Teleport SSH instances (of version 17.3 and newer) will use the UID provided by the control plane when automatically creating a new host user, if no other UID is specified by the user trait. + + + +Teleport will only use the UID defined by the control plane if the `create_host_user_mode` is `keep` (and not `insecure-drop`), and only for automatically created host users that were created after the feature was enabled. The UID will not be applied to users that already exist, and it will not take effect for static host users. + + + +You can inspect the list of assigned UIDs with the following command: + +```code +$ tctl stable-unix-users ls +``` + ### Step 3/4. Configure sudoers on your Linux servers Teleport host user creation leverages the `sudoers.d` directory for new users. diff --git a/docs/pages/includes/config-reference/auth-service.yaml b/docs/pages/includes/config-reference/auth-service.yaml index 668071408f7a3..483bbf711c342 100644 --- a/docs/pages/includes/config-reference/auth-service.yaml +++ b/docs/pages/includes/config-reference/auth-service.yaml @@ -266,6 +266,16 @@ auth_service: # "14h30m", "1h" etc. default_session_ttl: 12h + stable_unix_user_config: + # If set to true, SSH instances will use the same UID for each given + # username when automatically creating users. Defaults to false. + enabled: false + + # The range of UIDs (including both ends) used for automatic UID + # assignment. Ignored if enabled is set to false. + #first_uid: 90000 + #last_uid: 95000 + # IP and the port to bind to. Other Teleport Nodes will be connecting to # this port (AKA "Auth API" or "Cluster API") to validate client # certificates diff --git a/docs/pages/reference/cli/tctl.mdx b/docs/pages/reference/cli/tctl.mdx index 020a6da5708f3..5d02f797ca253 100644 --- a/docs/pages/reference/cli/tctl.mdx +++ b/docs/pages/reference/cli/tctl.mdx @@ -1549,6 +1549,36 @@ $ tctl get saml/your-connector-name --with-secrets | tctl sso test Make sure to include `--with-secrets` flag, or the exported auth connector will not be testable. +## tctl stable-unix-users ls + +List the stored usernames and UIDs for automatically created users: + +```code +$ tctl stable-unix-users ls +``` + +### Arguments + +This command accepts no arguments. + +### Flags + +| Name | Default Value(s) | Allowed Value(s) | Description | +| - | - | - | - | +| `--format` | `text` | `json` or `text` | Output format | + +### Examples + +```code +$ tctl stable-unix-users ls +Username UID +-------- ----- +alice 90000 +bob 90002 +carol 90003 +dan 90001 +``` + ## tctl status Report cluster and Certificate Authority status: diff --git a/docs/pages/reference/resources.mdx b/docs/pages/reference/resources.mdx index abbc9242bf96f..8dc55641b2202 100644 --- a/docs/pages/reference/resources.mdx +++ b/docs/pages/reference/resources.mdx @@ -308,6 +308,15 @@ spec: # Possible values: "local", "oidc", "saml" and "github" type: local + stable_unix_user_config: + # If set to true, SSH instances will use the same UID for each given + # username when automatically creating users. + enabled: false + + # The range of UIDs (including both ends) used for automatic UID assignment. + first_uid: 90000 + last_uid: 95000 + version: v2 ``` From 33dce486838c0f1787075ffd82459a7cc6fa7f5f Mon Sep 17 00:00:00 2001 From: Gabriel Corado Date: Fri, 31 Jan 2025 09:54:45 -0300 Subject: [PATCH 22/28] feat(usagereporter): add user agent to database session start usage event (#51591) --- lib/usagereporter/teleport/audit.go | 1 + lib/usagereporter/teleport/audit_test.go | 39 ++++++++++++++++++++++++ lib/usagereporter/teleport/types.go | 1 + 3 files changed, 41 insertions(+) diff --git a/lib/usagereporter/teleport/audit.go b/lib/usagereporter/teleport/audit.go index a1108374bb3fc..b492b40a56855 100644 --- a/lib/usagereporter/teleport/audit.go +++ b/lib/usagereporter/teleport/audit.go @@ -110,6 +110,7 @@ func ConvertAuditEvent(event apievents.AuditEvent) Anonymizable { DbType: e.DatabaseType, DbProtocol: e.DatabaseProtocol, DbOrigin: e.DatabaseOrigin, + UserAgent: e.UserAgent, }, UserKind: prehogUserKindFromEventKind(e.UserKind), } diff --git a/lib/usagereporter/teleport/audit_test.go b/lib/usagereporter/teleport/audit_test.go index 982b046b54d1a..4a63f10b13615 100644 --- a/lib/usagereporter/teleport/audit_test.go +++ b/lib/usagereporter/teleport/audit_test.go @@ -238,6 +238,45 @@ func TestConvertAuditEvent(t *testing.T) { }, }, }, + { + desc: "DatabaseSessionStart", + event: &apievents.DatabaseSessionStart{ + UserMetadata: apievents.UserMetadata{User: "alice"}, + DatabaseMetadata: apievents.DatabaseMetadata{ + DatabaseService: "postgres-local", + DatabaseProtocol: "postgres", + DatabaseName: "postgres", + DatabaseUser: "alice", + DatabaseType: "self-hosted", + DatabaseOrigin: "config-file", + }, + ClientMetadata: apievents.ClientMetadata{UserAgent: "psql"}, + }, + expected: &SessionStartEvent{ + SessionType: string(types.DatabaseSessionKind), + Database: &prehogv1a.SessionStartDatabaseMetadata{ + DbType: "self-hosted", + DbProtocol: "postgres", + DbOrigin: "config-file", + UserAgent: "psql", + }, + UserName: "alice", + }, + expectedAnonymized: &prehogv1a.SubmitEventRequest{ + Event: &prehogv1a.SubmitEventRequest_SessionStartV2{ + SessionStartV2: &prehogv1a.SessionStartEvent{ + SessionType: string(types.DatabaseSessionKind), + Database: &prehogv1a.SessionStartDatabaseMetadata{ + DbType: "self-hosted", + DbProtocol: "postgres", + DbOrigin: "config-file", + UserAgent: "psql", + }, + UserName: anonymizer.AnonymizeString("alice"), + }, + }, + }, + }, } for _, tt := range cases { diff --git a/lib/usagereporter/teleport/types.go b/lib/usagereporter/teleport/types.go index 3b783f73b55e1..57ac851f8792c 100644 --- a/lib/usagereporter/teleport/types.go +++ b/lib/usagereporter/teleport/types.go @@ -105,6 +105,7 @@ func (u *SessionStartEvent) Anonymize(a utils.Anonymizer) prehogv1a.SubmitEventR DbType: u.Database.DbType, DbProtocol: u.Database.DbProtocol, DbOrigin: u.Database.DbOrigin, + UserAgent: u.Database.UserAgent, } } if u.Desktop != nil { From 38e28dd3758b34625ccc45292056ae7920ebe3eb Mon Sep 17 00:00:00 2001 From: Steven Martin Date: Fri, 31 Jan 2025 08:00:31 -0500 Subject: [PATCH 23/28] docs: update github integration (#51678) --- .../admin-guides/management/guides/github-integration.mdx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/pages/admin-guides/management/guides/github-integration.mdx b/docs/pages/admin-guides/management/guides/github-integration.mdx index 6b95d6fa17e60..89ab6d9db02de 100644 --- a/docs/pages/admin-guides/management/guides/github-integration.mdx +++ b/docs/pages/admin-guides/management/guides/github-integration.mdx @@ -53,7 +53,7 @@ Go to "OAuth Apps" under "Developer Settings" of your organization's settings. Click on "New OAuth App". Fill in the details. Use the following for "Authentication callback URL": -``` +```code https:///v1/webapi/github/ ``` @@ -86,7 +86,7 @@ previous step. To create the resource with `tctl`, run: ```code -$ tctl create -f github_integration.yaml +$ tctl create github_integration.yaml ``` Once the integration resource is created, export the CA to be used for GitHub: @@ -117,7 +117,7 @@ spec: To create the resource with `tctl`, run: ```code -$ tctl create -f git_server.yaml +$ tctl create git_server.yaml ``` The user role must have `github_permissions` configured to allow access to your From 797936d9e26df1aa03d3ea99748de5942069db6a Mon Sep 17 00:00:00 2001 From: Bartosz Leper Date: Fri, 31 Jan 2025 15:00:13 +0100 Subject: [PATCH 24/28] Support more fields in the role editor (#51458) * Add more fields to the role editor * Review --- lib/services/presets.go | 32 +++++++++ .../StandardEditor/AccessRules.test.tsx | 2 + .../RoleEditor/StandardEditor/AccessRules.tsx | 28 +++++++- .../StandardEditor/Resources.test.tsx | 45 +++++++++++-- .../RoleEditor/StandardEditor/Resources.tsx | 66 ++++++++++++------- .../StandardEditor/standardmodel.test.ts | 39 +++++++++++ .../StandardEditor/standardmodel.ts | 39 ++++++++++- .../StandardEditor/validation.test.ts | 13 +++- .../RoleEditor/StandardEditor/validation.ts | 1 + .../teleport/src/services/resources/types.ts | 3 + 10 files changed, 232 insertions(+), 36 deletions(-) diff --git a/lib/services/presets.go b/lib/services/presets.go index 1cf2a918a388d..ff0d4993f6f40 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -104,6 +104,10 @@ func NewSystemAutomaticAccessBotUser() types.User { // NewPresetEditorRole returns a new pre-defined role for cluster // editors who can edit cluster configuration resources. func NewPresetEditorRole() types.Role { + // IMPORTANT: Before adding new defaults, please make sure that the + // underlying field is supported by the standard role editor UI. This role + // should be editable with a rich UI, without requiring the user to dive into + // YAML. role := &types.RoleV6{ Kind: types.KindRole, Version: types.V7, @@ -116,6 +120,10 @@ func NewPresetEditorRole() types.Role { }, }, Spec: types.RoleSpecV6{ + // IMPORTANT: Before adding new defaults, please make sure that the + // underlying field is supported by the standard role editor UI. This role + // should be editable with a rich UI, without requiring the user to dive into + // YAML. Options: types.RoleOptions{ CertificateFormat: constants.CertificateFormatStandard, MaxSessionTTL: types.NewDuration(apidefaults.MaxCertDuration), @@ -133,6 +141,10 @@ func NewPresetEditorRole() types.Role { Desktop: types.NewBoolOption(false), }, }, + // IMPORTANT: Before adding new defaults, please make sure that the + // underlying field is supported by the standard role editor UI. This role + // should be editable with a rich UI, without requiring the user to dive into + // YAML. Allow: types.RoleConditions{ Namespaces: []string{apidefaults.Namespace}, Rules: []types.Rule{ @@ -208,6 +220,10 @@ func NewPresetEditorRole() types.Role { // NewPresetAccessRole creates a role for users who are allowed to initiate // interactive sessions. func NewPresetAccessRole() types.Role { + // IMPORTANT: Before adding new defaults, please make sure that the + // underlying field is supported by the standard role editor UI. This role + // should be editable with a rich UI, without requiring the user to dive into + // YAML. role := &types.RoleV6{ Kind: types.KindRole, Version: types.V7, @@ -220,6 +236,10 @@ func NewPresetAccessRole() types.Role { }, }, Spec: types.RoleSpecV6{ + // IMPORTANT: Before adding new defaults, please make sure that the + // underlying field is supported by the standard role editor UI. This role + // should be editable with a rich UI, without requiring the user to dive into + // YAML. Options: types.RoleOptions{ CertificateFormat: constants.CertificateFormatStandard, MaxSessionTTL: types.NewDuration(apidefaults.MaxCertDuration), @@ -235,6 +255,10 @@ func NewPresetAccessRole() types.Role { BPF: apidefaults.EnhancedEvents(), RecordSession: &types.RecordSession{Desktop: types.NewBoolOption(true)}, }, + // IMPORTANT: Before adding new defaults, please make sure that the + // underlying field is supported by the standard role editor UI. This role + // should be editable with a rich UI, without requiring the user to dive into + // YAML. Allow: types.RoleConditions{ Namespaces: []string{apidefaults.Namespace}, NodeLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, @@ -270,6 +294,10 @@ func NewPresetAccessRole() types.Role { }, }, } + // IMPORTANT: Before adding new defaults, please make sure that the + // underlying field is supported by the standard role editor UI. This role + // should be editable with a rich UI, without requiring the user to dive into + // YAML. role.SetLogins(types.Allow, []string{teleport.TraitInternalLoginsVariable}) role.SetWindowsLogins(types.Allow, []string{teleport.TraitInternalWindowsLoginsVariable}) role.SetKubeUsers(types.Allow, []string{teleport.TraitInternalKubeUsersVariable}) @@ -284,6 +312,10 @@ func NewPresetAccessRole() types.Role { // auditor - someone who can review cluster events and replay sessions, // but can't initiate interactive sessions or modify configuration. func NewPresetAuditorRole() types.Role { + // IMPORTANT: Before adding new defaults, please make sure that the + // underlying field is supported by the standard role editor UI. This role + // should be editable with a rich UI, without requiring the user to dive into + // YAML. role := &types.RoleV6{ Kind: types.KindRole, Version: types.V7, diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/AccessRules.test.tsx b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/AccessRules.test.tsx index 0eabc61fd90db..95d96d6b13ece 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/AccessRules.test.tsx +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/AccessRules.test.tsx @@ -58,6 +58,7 @@ describe('AccessRules', () => { 'list', 'read', ]); + await user.type(screen.getByLabelText('Filter'), 'some-filter'); expect(modelRef).toHaveBeenLastCalledWith([ { id: expect.any(String), @@ -69,6 +70,7 @@ describe('AccessRules', () => { { label: 'list', value: 'list' }, { label: 'read', value: 'read' }, ], + where: 'some-filter', }, ] as RuleModel[]); }); diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/AccessRules.tsx b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/AccessRules.tsx index a477d87523a16..18c3baa2d14cd 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/AccessRules.tsx +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/AccessRules.tsx @@ -18,12 +18,14 @@ import { memo } from 'react'; import { components, MultiValueProps } from 'react-select'; -import styled from 'styled-components'; +import styled, { useTheme } from 'styled-components'; import { ButtonSecondary } from 'design/Button'; import Flex from 'design/Flex'; import { Plus } from 'design/Icon'; +import Text from 'design/Text'; import { HoverTooltip } from 'design/Tooltip'; +import FieldInput from 'shared/components/FieldInput'; import { FieldSelect, FieldSelectCreatable, @@ -78,7 +80,8 @@ const AccessRule = memo(function AccessRule({ validation, dispatch, }: SectionPropsWithDispatch) { - const { id, resources, verbs } = value; + const { id, resources, verbs, where } = value; + const theme = useTheme(); function setRule(rule: RuleModel) { dispatch({ type: 'set-access-rule', payload: rule }); } @@ -112,6 +115,27 @@ const AccessRule = memo(function AccessRule({ value={verbs} onChange={v => setRule({ ...value, verbs: v })} rule={precomputed(validation.fields.verbs)} + /> + + Optional condition that further limits the list of resources + affected by this rule, expressed using the{' '} + + Teleport predicate language + + + } + tooltipSticky + disabled={isProcessing} + value={where} + onChange={e => setRule({ ...value, where: e.target.value })} mb={0} /> diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/Resources.test.tsx b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/Resources.test.tsx index 62ff7b15997bc..61f1be419af87 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/Resources.test.tsx +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/Resources.test.tsx @@ -141,6 +141,13 @@ describe('KubernetesAccessSection', () => { await user.type(screen.getByPlaceholderText('label key'), 'some-key'); await user.type(screen.getByPlaceholderText('label value'), 'some-value'); + await selectEvent.create(screen.getByLabelText('Users'), 'joe', { + createOptionText: 'User: joe', + }); + await selectEvent.create(screen.getByLabelText('Users'), 'mary', { + createOptionText: 'User: mary', + }); + await user.click(screen.getByRole('button', { name: 'Add a Resource' })); expect( reactSelectValueContainer(screen.getByLabelText('Kind')) @@ -178,6 +185,10 @@ describe('KubernetesAccessSection', () => { roleVersion: 'v7', }, ], + users: [ + expect.objectContaining({ value: 'joe' }), + expect.objectContaining({ value: 'mary' }), + ], roleVersion: 'v7', } as KubernetesAccess); }); @@ -391,9 +402,12 @@ describe('DatabaseAccessSection', () => { test('editing', async () => { const { user, onChange } = setup(); - await user.click(screen.getByRole('button', { name: 'Add a Label' })); - await user.type(screen.getByPlaceholderText('label key'), 'env'); - await user.type(screen.getByPlaceholderText('label value'), 'prod'); + + const labels = within(screen.getByRole('group', { name: 'Labels' })); + await user.click(labels.getByRole('button', { name: 'Add a Label' })); + await user.type(labels.getByPlaceholderText('label key'), 'env'); + await user.type(labels.getByPlaceholderText('label value'), 'prod'); + await selectEvent.create(screen.getByLabelText('Database Names'), 'stuff', { createOptionText: 'Database Name: stuff', }); @@ -403,6 +417,16 @@ describe('DatabaseAccessSection', () => { await selectEvent.create(screen.getByLabelText('Database Roles'), 'admin', { createOptionText: 'Database Role: admin', }); + + const dbServiceLabels = within( + screen.getByRole('group', { name: 'Database Service Labels' }) + ); + await user.click( + dbServiceLabels.getByRole('button', { name: 'Add a Label' }) + ); + await user.type(dbServiceLabels.getByPlaceholderText('label key'), 'foo'); + await user.type(dbServiceLabels.getByPlaceholderText('label value'), 'bar'); + expect(onChange).toHaveBeenLastCalledWith({ kind: 'db', labels: [{ name: 'env', value: 'prod' }], @@ -418,18 +442,29 @@ describe('DatabaseAccessSection', () => { expect.objectContaining({ value: '{{internal.db_users}}' }), expect.objectContaining({ label: 'mary', value: 'mary' }), ], + dbServiceLabels: [{ name: 'foo', value: 'bar' }], } as DatabaseAccess); }); test('validation', async () => { const { user, validator } = setup(); - await user.click(screen.getByRole('button', { name: 'Add a Label' })); + const labels = within(screen.getByRole('group', { name: 'Labels' })); + await user.click(labels.getByRole('button', { name: 'Add a Label' })); + const dbServiceLabelsGroup = within( + screen.getByRole('group', { name: 'Database Service Labels' }) + ); + await user.click( + dbServiceLabelsGroup.getByRole('button', { name: 'Add a Label' }) + ); await selectEvent.create(screen.getByLabelText('Database Roles'), '*', { createOptionText: 'Database Role: *', }); act(() => validator.validate()); expect( - screen.getByPlaceholderText('label key') + labels.getByPlaceholderText('label key') + ).toHaveAccessibleDescription('required'); + expect( + dbServiceLabelsGroup.getByPlaceholderText('label key') ).toHaveAccessibleDescription('required'); expect( screen.getByText('Wildcard is not allowed in database roles') diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/Resources.tsx b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/Resources.tsx index 5b59d78b35b51..c85c6f515211f 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/Resources.tsx +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/Resources.tsx @@ -25,7 +25,7 @@ import ButtonIcon from 'design/ButtonIcon'; import Flex from 'design/Flex'; import { Add, Plus, Trash } from 'design/Icon'; import { Mark } from 'design/Mark'; -import Text, { H4 } from 'design/Text'; +import { H4 } from 'design/Text'; import FieldInput from 'shared/components/FieldInput'; import { FieldMultiInput } from 'shared/components/FieldMultiInput/FieldMultiInput'; import { @@ -232,10 +232,8 @@ export function ServerAccessSection({ }: SectionProps) { return ( <> - - Labels - onChange?.({ ...value, labels })} @@ -244,6 +242,7 @@ export function ServerAccessSection({ `Login: ${label}`} components={{ @@ -271,6 +270,7 @@ export function KubernetesAccessSection({ `Group: ${label}`} components={{ @@ -281,10 +281,22 @@ export function KubernetesAccessSection({ onChange={groups => onChange?.({ ...value, groups })} /> - - Labels - + `User: ${label}`} + components={{ + DropdownIndicator: null, + }} + openMenuOnClick={false} + value={value.users} + onChange={users => onChange?.({ ...value, users })} + /> + ) { return ( - - - Labels - - onChange?.({ ...value, labels })} - rule={precomputed(validation.fields.labels)} - /> - + onChange?.({ ...value, labels })} + rule={precomputed(validation.fields.labels)} + /> - - Labels - onChange?.({ ...value, labels })} @@ -491,6 +498,7 @@ export function DatabaseAccessSection({ List of database names that this role is allowed to connect to. @@ -509,6 +517,7 @@ export function DatabaseAccessSection({ List of database users that this role is allowed to connect as. @@ -527,6 +536,7 @@ export function DatabaseAccessSection({ `Database Role: ${label}`} @@ -537,7 +547,14 @@ export function DatabaseAccessSection({ value={value.roles} onChange={roles => onChange?.({ ...value, roles })} rule={precomputed(validation.fields.roles)} - mb={0} + /> + onChange?.({ ...value, dbServiceLabels })} + rule={precomputed(validation.fields.dbServiceLabels)} /> ); @@ -552,10 +569,8 @@ export function WindowsDesktopAccessSection({ return ( <> - - Labels - onChange?.({ ...value, labels })} @@ -565,6 +580,7 @@ export function WindowsDesktopAccessSection({ `Login: ${label}`} diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/standardmodel.test.ts b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/standardmodel.test.ts index 91d96b1638086..397722ff14b73 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/standardmodel.test.ts +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/standardmodel.test.ts @@ -197,6 +197,7 @@ describe.each<{ name: string; role: Role; model: RoleEditorModel }>([ db_names: ['stuff', 'knickknacks'], db_users: ['joe', 'mary'], db_roles: ['admin', 'auditor'], + db_service_labels: { foo: 'bar' }, }, }, }, @@ -218,6 +219,7 @@ describe.each<{ name: string; role: Role; model: RoleEditorModel }>([ { label: 'admin', value: 'admin' }, { label: 'auditor', value: 'auditor' }, ], + dbServiceLabels: [{ name: 'foo', value: 'bar' }], }, ], }, @@ -440,6 +442,7 @@ describe('roleToRoleEditorModel', () => { groups: [], labels: [], resources: [], + users: [], roleVersion: defaultRoleVersion, }); @@ -868,6 +871,7 @@ describe('roleToRoleEditorModel', () => { name: 'some-node', }, ], + kubernetes_users: ['alice', 'bob'], }, }, }) @@ -902,6 +906,10 @@ describe('roleToRoleEditorModel', () => { roleVersion: defaultRoleVersion, }, ], + users: [ + { label: 'alice', value: 'alice' }, + { label: 'bob', value: 'bob' }, + ], roleVersion: defaultRoleVersion, }, ], @@ -948,6 +956,11 @@ describe('roleToRoleEditorModel', () => { verbs: ['read', 'list'], }, { resources: [ResourceKind.Lock], verbs: ['create'] }, + { + resources: [ResourceKind.Session], + verbs: ['read', 'list'], + where: 'contains(session.participants, user.metadata.name)', + }, ], }, }, @@ -962,11 +975,19 @@ describe('roleToRoleEditorModel', () => { resourceKindOptionsMap.get(ResourceKind.DatabaseService), ], verbs: [verbOptionsMap.get('read'), verbOptionsMap.get('list')], + where: '', }, { id: expect.any(String), resources: [resourceKindOptionsMap.get(ResourceKind.Lock)], verbs: [verbOptionsMap.get('create')], + where: '', + }, + { + id: expect.any(String), + resources: [resourceKindOptionsMap.get(ResourceKind.Session)], + verbs: [verbOptionsMap.get('read'), verbOptionsMap.get('list')], + where: 'contains(session.participants, user.metadata.name)', }, ], } as RoleEditorModel); @@ -1042,6 +1063,10 @@ describe('roleEditorModelToRole', () => { roleVersion: defaultRoleVersion, }, ], + users: [ + { label: 'alice', value: 'alice' }, + { label: 'bob', value: 'bob' }, + ], roleVersion: defaultRoleVersion, }, ], @@ -1067,6 +1092,7 @@ describe('roleEditorModelToRole', () => { verbs: [], }, ], + kubernetes_users: ['alice', 'bob'], }, }, } as Role); @@ -1084,11 +1110,19 @@ describe('roleEditorModelToRole', () => { resourceKindOptionsMap.get(ResourceKind.DatabaseService), ], verbs: [verbOptionsMap.get('read'), verbOptionsMap.get('list')], + where: '', }, { id: 'dummy-id-2', resources: [resourceKindOptionsMap.get(ResourceKind.Lock)], verbs: [verbOptionsMap.get('create')], + where: '', + }, + { + id: expect.any(String), + resources: [resourceKindOptionsMap.get(ResourceKind.Session)], + verbs: [verbOptionsMap.get('read'), verbOptionsMap.get('list')], + where: 'contains(session.participants, user.metadata.name)', }, ], }) @@ -1100,6 +1134,11 @@ describe('roleEditorModelToRole', () => { rules: [ { resources: ['user', 'db_service'], verbs: ['read', 'list'] }, { resources: ['lock'], verbs: ['create'] }, + { + resources: ['session'], + verbs: ['read', 'list'], + where: 'contains(session.participants, user.metadata.name)', + }, ], }, }, diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/standardmodel.ts b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/standardmodel.ts index e0814f0731089..11a2d1fcc9326 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/standardmodel.ts +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/standardmodel.ts @@ -114,6 +114,7 @@ export type KubernetesAccess = ResourceAccessBase<'kube_cluster'> & { groups: readonly Option[]; labels: UILabel[]; resources: KubernetesResourceModel[]; + users: readonly Option[]; /** * Version of the role that owns this section. Required to propagate it to @@ -265,6 +266,7 @@ export type DatabaseAccess = ResourceAccessBase<'db'> & { names: readonly Option[]; users: readonly Option[]; roles: readonly Option[]; + dbServiceLabels: UILabel[]; }; export type WindowsDesktopAccess = ResourceAccessBase<'windows_desktop'> & { @@ -283,6 +285,7 @@ export type RuleModel = { */ resources: readonly ResourceKindOption[]; verbs: readonly VerbOption[]; + where: string; }; export type OptionsModel = { @@ -453,6 +456,7 @@ export function newResourceAccess( groups: [stringToOption('{{internal.kubernetes_groups}}')], labels: [], resources: [], + users: [], roleVersion, }; case 'app': @@ -470,6 +474,7 @@ export function newResourceAccess( names: [stringToOption('{{internal.db_names}}')], users: [stringToOption('{{internal.db_users}}')], roles: [stringToOption('{{internal.db_roles}}')], + dbServiceLabels: [], }; case 'windows_desktop': return { @@ -500,6 +505,7 @@ export function newRuleModel(): RuleModel { id: crypto.randomUUID(), resources: [], verbs: [], + where: '', }; } @@ -579,9 +585,11 @@ function roleConditionsToModel( db_names, db_users, db_roles, + db_service_labels, windows_desktop_labels, windows_desktop_logins, + kubernetes_users, rules, @@ -606,12 +614,21 @@ function roleConditionsToModel( model: kubeResourcesModel, requiresReset: kubernetesResourcesRequireReset, } = kubernetesResourcesToModel(kubernetes_resources, roleVersion); - if (someNonEmpty(kubeGroupsModel, kubeLabelsModel, kubeResourcesModel)) { + const kubeUsersModel = stringsToOptions(kubernetes_users ?? []); + if ( + someNonEmpty( + kubeGroupsModel, + kubeLabelsModel, + kubeResourcesModel, + kubeUsersModel + ) + ) { resources.push({ kind: 'kube_cluster', groups: kubeGroupsModel, labels: kubeLabelsModel, resources: kubeResourcesModel, + users: kubeUsersModel, roleVersion, }); } @@ -641,13 +658,23 @@ function roleConditionsToModel( const dbNamesModel = db_names ?? []; const dbUsersModel = db_users ?? []; const dbRolesModel = db_roles ?? []; - if (someNonEmpty(dbLabelsModel, dbNamesModel, dbUsersModel, dbRolesModel)) { + const dbServiceLabelsModel = labelsToModel(db_service_labels); + if ( + someNonEmpty( + dbLabelsModel, + dbNamesModel, + dbUsersModel, + dbRolesModel, + dbServiceLabelsModel + ) + ) { resources.push({ kind: 'db', labels: dbLabelsModel, names: stringsToOptions(dbNamesModel), users: stringsToOptions(dbUsersModel), roles: stringsToOptions(dbRolesModel), + dbServiceLabels: dbServiceLabelsModel, }); } @@ -761,7 +788,7 @@ function rulesToModel(rules: Rule[]): { } function ruleToModel(rule: Rule): { model: RuleModel; requiresReset: boolean } { - const { resources = [], verbs = [], ...unsupported } = rule; + const { resources = [], verbs = [], where = '', ...unsupported } = rule; const resourcesModel = resources.map( k => resourceKindOptionsMap.get(k) ?? { label: k, value: k } ); @@ -774,6 +801,7 @@ function ruleToModel(rule: Rule): { model: RuleModel; requiresReset: boolean } { id: crypto.randomUUID(), resources: resourcesModel, verbs: knownVerbsModel, + where, }, requiresReset, }; @@ -970,6 +998,7 @@ export function roleEditorModelToRole(roleModel: RoleEditorModel): Role { verbs: optionsToStrings(verbs), }) ); + role.spec.allow.kubernetes_users = optionsToStrings(res.users); break; case 'app': @@ -984,6 +1013,9 @@ export function roleEditorModelToRole(roleModel: RoleEditorModel): Role { role.spec.allow.db_names = optionsToStrings(res.names); role.spec.allow.db_users = optionsToStrings(res.users); role.spec.allow.db_roles = optionsToStrings(res.roles); + role.spec.allow.db_service_labels = labelsModelToLabels( + res.dbServiceLabels + ); break; case 'windows_desktop': @@ -1002,6 +1034,7 @@ export function roleEditorModelToRole(roleModel: RoleEditorModel): Role { role.spec.allow.rules = roleModel.rules.map(role => ({ resources: role.resources.map(r => r.value), verbs: role.verbs.map(v => v.value), + where: role.where || undefined, })); } diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.test.ts b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.test.ts index afcdce5eceaa6..829b725bcca5c 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.test.ts +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.test.ts @@ -66,6 +66,7 @@ describe('validateRoleEditorModel', () => { kind: 'kube_cluster', labels: [{ name: 'foo', value: 'bar' }], groups: [], + users: [], resources: [ { id: 'dummy-id', @@ -96,6 +97,7 @@ describe('validateRoleEditorModel', () => { roles: [{ label: 'some-role', value: 'some-role' }], names: [], users: [], + dbServiceLabels: [{ name: 'asdf', value: 'qwer' }], }, { kind: 'windows_desktop', @@ -108,6 +110,7 @@ describe('validateRoleEditorModel', () => { id: 'dummy-id', resources: [{ label: ResourceKind.Node, value: ResourceKind.Node }], verbs: [{ label: '*', value: '*' }], + where: '', }, ]; const result = validateRoleEditorModel(model, undefined, undefined); @@ -146,6 +149,7 @@ describe('validateRoleEditorModel', () => { kind: 'kube_cluster', groups: [], labels: [], + users: [], resources: [ { ...newKubernetesResourceModel(defaultRoleVersion), @@ -178,6 +182,7 @@ describe('validateRoleEditorModel', () => { kind: 'kube_cluster', groups: [], labels: [], + users: [], roleVersion, resources: [ { @@ -214,6 +219,7 @@ describe('validateRoleEditorModel', () => { id: 'dummy-id', resources: [], verbs: [{ label: '*', value: '*' }], + where: '', }, ]; const result = validateRoleEditorModel(model, undefined, undefined); @@ -243,7 +249,12 @@ describe('validateResourceAccess', () => { describe('validateAccessRule', () => { it('reuses previously computed results', () => { - const rule: RuleModel = { id: 'some-id', resources: [], verbs: [] }; + const rule: RuleModel = { + id: 'some-id', + resources: [], + verbs: [], + where: '', + }; const result1 = validateAccessRule(rule, undefined, undefined); const result2 = validateAccessRule(rule, rule, result1); expect(result2).toBe(result1); diff --git a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.ts b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.ts index a1292e06e5e7c..13857647b461f 100644 --- a/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.ts +++ b/web/packages/teleport/src/Roles/RoleEditor/StandardEditor/validation.ts @@ -287,6 +287,7 @@ export type AppAccessValidationResult = RuleSetValidationResult< const databaseAccessValidationRules = { labels: nonEmptyLabels, roles: noWildcardOptions('Wildcard is not allowed in database roles'), + dbServiceLabels: nonEmptyLabels, }; export type DatabaseAccessValidationResult = RuleSetValidationResult< typeof databaseAccessValidationRules diff --git a/web/packages/teleport/src/services/resources/types.ts b/web/packages/teleport/src/services/resources/types.ts index e5b4219338838..9a5d315045de3 100644 --- a/web/packages/teleport/src/services/resources/types.ts +++ b/web/packages/teleport/src/services/resources/types.ts @@ -80,6 +80,7 @@ export type RoleConditions = { kubernetes_groups?: string[]; kubernetes_labels?: Labels; kubernetes_resources?: KubernetesResource[]; + kubernetes_users?: string[]; app_labels?: Labels; aws_role_arns?: string[]; @@ -90,6 +91,7 @@ export type RoleConditions = { db_names?: string[]; db_users?: string[]; db_roles?: string[]; + db_service_labels?: Labels; windows_desktop_labels?: Labels; windows_desktop_logins?: string[]; @@ -163,6 +165,7 @@ export type KubernetesVerb = export type Rule = { resources?: ResourceKind[]; verbs?: Verb[]; + where?: string; }; export enum ResourceKind { From 2c1280c0d5c543b0332a5b2fd2251920f60c99b9 Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:09:27 -0500 Subject: [PATCH 25/28] Improve web ui host dialing for file transfers (#51447) Converts web file transfers to use the same in process dialing that web sessions use. This eliminates extra latency by avoiding an SSH dial from the proxy to the proxy as a result of no longer using client.TeleportClient to establish the connection to the target host. Closes https://github.com/gravitational/teleport/issues/24419. --- lib/web/files.go | 82 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 77 insertions(+), 5 deletions(-) diff --git a/lib/web/files.go b/lib/web/files.go index 048236d320449..83c4c4004959d 100644 --- a/lib/web/files.go +++ b/lib/web/files.go @@ -28,15 +28,21 @@ import ( "github.com/julienschmidt/httprouter" "golang.org/x/crypto/ssh" + "github.com/gravitational/teleport" "github.com/gravitational/teleport/api/client/proto" "github.com/gravitational/teleport/api/defaults" + apidefaults "github.com/gravitational/teleport/api/defaults" "github.com/gravitational/teleport/api/utils/keys" "github.com/gravitational/teleport/api/utils/sshutils" + "github.com/gravitational/teleport/lib/agentless" "github.com/gravitational/teleport/lib/auth/authclient" "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/modules" "github.com/gravitational/teleport/lib/multiplexer" "github.com/gravitational/teleport/lib/reversetunnelclient" "github.com/gravitational/teleport/lib/sshutils/sftp" + "github.com/gravitational/teleport/lib/teleagent" + "github.com/gravitational/teleport/lib/utils" ) // fileTransferRequest describes HTTP file transfer request @@ -144,8 +150,7 @@ func (h *Handler) transferFile(w http.ResponseWriter, r *http.Request, p httprou } if req.mfaResponse != "" { - err = ft.issueSingleUseCert(mfaResponse, r, tc) - if err != nil { + if err = ft.issueSingleUseCert(mfaResponse, r, tc); err != nil { return nil, trace.Wrap(err) } } @@ -155,17 +160,84 @@ func (h *Handler) transferFile(w http.ResponseWriter, r *http.Request, p httprou ctx = context.WithValue(ctx, sftp.ModeratedSessionID, req.moderatedSessionID) } - cl, err := tc.ConnectToCluster(ctx) + accessPoint, err := site.CachingAccessPoint() if err != nil { + h.logger.DebugContext(r.Context(), "Unable to get auth access point", "error", err) return nil, trace.Wrap(err) } - defer cl.Close() - err = tc.TransferFiles(ctx, cl, req.login, req.serverID+":0", cfg) + accessChecker, err := sctx.GetUserAccessChecker() if err != nil { + return nil, trace.Wrap(err) + } + + getAgent := func() (teleagent.Agent, error) { + return teleagent.NopCloser(tc.LocalAgent()), nil + } + cert, err := sctx.GetSSHCertificate() + if err != nil { + return nil, trace.Wrap(err) + } + signer := agentless.SignerFromSSHCertificate(cert, h.auth.accessPoint, tc.SiteName, tc.Username) + + conn, err := h.cfg.Router.DialHost( + ctx, + &utils.NetAddr{Addr: r.RemoteAddr}, + &h.cfg.ProxyWebAddr, + req.serverID, + "0", + tc.SiteName, + accessChecker, + getAgent, + signer, + ) + if err != nil { + if errors.Is(err, teleport.ErrNodeIsAmbiguous) { + const message = "error: ambiguous host could match multiple nodes\n\nHint: try addressing the node by unique id (ex: user@node-id)\n" + return nil, trace.NotFound(message) + } + + return nil, trace.Wrap(err) + } + + dialTimeout := apidefaults.DefaultIOTimeout + if netConfig, err := accessPoint.GetClusterNetworkingConfig(ctx); err != nil { + h.logger.DebugContext(r.Context(), "Unable to fetch cluster networking config", "error", err) + } else { + dialTimeout = netConfig.GetSSHDialTimeout() + } + + sshConfig := &ssh.ClientConfig{ + User: tc.HostLogin, + Auth: tc.AuthMethods, + HostKeyCallback: tc.HostKeyCallback, + Timeout: dialTimeout, + } + + nodeClient, err := client.NewNodeClient( + ctx, + sshConfig, + conn, + req.serverID+":0", + req.serverID, + tc, + modules.GetModules().IsBoringBinary(), + ) + if err != nil { + // The close error is ignored instead of using [trace.NewAggregate] because + // aggregate errors do not allow error inspection with things like [trace.IsAccessDenied]. + _ = conn.Close() + + return nil, trace.Wrap(err) + } + + defer nodeClient.Close() + + if err := nodeClient.TransferFiles(ctx, cfg); err != nil { if errors.As(err, new(*sftp.NonRecursiveDirectoryTransferError)) { return nil, trace.Errorf("transferring directories through the Web UI is not supported at the moment, please use tsh scp -r") } + return nil, trace.Wrap(err) } From a5398327627afb896aa06aff637f325e55f6d277 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Fri, 31 Jan 2025 09:49:31 -0500 Subject: [PATCH 26/28] Ensure proxy version getter adds the leading 'v' (#51687) --- lib/automaticupgrades/version/proxy.go | 13 ++- lib/automaticupgrades/version/proxy_test.go | 116 ++++++++++++++++++++ 2 files changed, 124 insertions(+), 5 deletions(-) create mode 100644 lib/automaticupgrades/version/proxy_test.go diff --git a/lib/automaticupgrades/version/proxy.go b/lib/automaticupgrades/version/proxy.go index db55123dd529e..c7626a7b16b87 100644 --- a/lib/automaticupgrades/version/proxy.go +++ b/lib/automaticupgrades/version/proxy.go @@ -28,11 +28,15 @@ import ( "github.com/gravitational/teleport/lib/automaticupgrades/constants" ) +type Finder interface { + Find() (*webclient.PingResponse, error) +} + type proxyVersionClient struct { - client *webclient.ReusableClient + client Finder } -func (b *proxyVersionClient) Get(ctx context.Context) (string, error) { +func (b *proxyVersionClient) Get(_ context.Context) (string, error) { resp, err := b.client.Find() if err != nil { return "", trace.Wrap(err) @@ -41,7 +45,7 @@ func (b *proxyVersionClient) Get(ctx context.Context) (string, error) { if resp.AutoUpdate.AgentVersion == "" { return "", trace.NotImplemented("proxy does not seem to implement RFD-184") } - return resp.AutoUpdate.AgentVersion, nil + return EnsureSemver(resp.AutoUpdate.AgentVersion) } // ProxyVersionGetter gets the target version from the Teleport Proxy Service /find endpoint, as @@ -60,8 +64,7 @@ func (g ProxyVersionGetter) Name() string { // GetVersion implements Getter func (g ProxyVersionGetter) GetVersion(ctx context.Context) (string, error) { - result, err := g.cachedGetter(ctx) - return result, trace.Wrap(err) + return g.cachedGetter(ctx) } // NewProxyVersionGetter creates a ProxyVersionGetter from a webclient. diff --git a/lib/automaticupgrades/version/proxy_test.go b/lib/automaticupgrades/version/proxy_test.go new file mode 100644 index 0000000000000..2360f271c25a1 --- /dev/null +++ b/lib/automaticupgrades/version/proxy_test.go @@ -0,0 +1,116 @@ +/* + * Teleport + * Copyright (C) 2025 Gravitational, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +package version + +import ( + "context" + "testing" + + "github.com/gravitational/trace" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/gravitational/teleport/api/client/webclient" +) + +type mockWebClient struct { + mock.Mock +} + +func (m *mockWebClient) Find() (*webclient.PingResponse, error) { + args := m.Called() + return args.Get(0).(*webclient.PingResponse), args.Error(1) +} + +func TestProxyVersionClient(t *testing.T) { + ctx := context.Background() + tests := []struct { + name string + pong *webclient.PingResponse + pongErr error + expectedVersion string + expectErr require.ErrorAssertionFunc + }{ + { + name: "semver without leading v", + pong: &webclient.PingResponse{ + AutoUpdate: webclient.AutoUpdateSettings{ + AgentVersion: "1.2.3", + }, + }, + expectedVersion: "v1.2.3", + expectErr: require.NoError, + }, + { + name: "semver with leading v", + pong: &webclient.PingResponse{ + AutoUpdate: webclient.AutoUpdateSettings{ + AgentVersion: "v1.2.3", + }, + }, + expectedVersion: "v1.2.3", + expectErr: require.NoError, + }, + { + name: "semver with prerelease and no leading v", + pong: &webclient.PingResponse{ + AutoUpdate: webclient.AutoUpdateSettings{ + AgentVersion: "1.2.3-dev.bartmoss.1", + }, + }, + expectedVersion: "v1.2.3-dev.bartmoss.1", + expectErr: require.NoError, + }, + { + name: "invalid semver", + pong: &webclient.PingResponse{ + AutoUpdate: webclient.AutoUpdateSettings{ + AgentVersion: "v", + }, + }, + expectedVersion: "", + expectErr: require.Error, + }, + { + name: "empty response", + pong: &webclient.PingResponse{}, + expectedVersion: "", + expectErr: func(t require.TestingT, err error, i ...interface{}) { + require.ErrorIs(t, err, trace.NotImplemented("proxy does not seem to implement RFD-184")) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test setup: create mock and load fixtures. + webClient := &mockWebClient{} + webClient.On("Find").Once().Return(tt.pong, tt.pongErr) + + // Test execution. + clt := proxyVersionClient{client: webClient} + v, err := clt.Get(ctx) + + // Test validation. + tt.expectErr(t, err) + require.Equal(t, tt.expectedVersion, v) + webClient.AssertExpectations(t) + }) + } +} From 7b160600b8c4da59197417d967b70c895e73245a Mon Sep 17 00:00:00 2001 From: "STeve (Xin) Huang" Date: Fri, 31 Jan 2025 10:35:31 -0500 Subject: [PATCH 27/28] Fix misaligned command usage for 'help' commands of Teleport binaries (#51660) * Fix an issue command help is not aligned for help command * align when unknown command/subcommand --- lib/utils/cli.go | 37 +++++++++++++------- lib/utils/cli_test.go | 81 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 12 deletions(-) diff --git a/lib/utils/cli.go b/lib/utils/cli.go index 1c2e0c68928a7..38513f5bb6f40 100644 --- a/lib/utils/cli.go +++ b/lib/utils/cli.go @@ -318,25 +318,38 @@ func createUsageTemplate(opts ...func(*usageTemplateOptions)) string { // pre-parsing the arguments then applying any changes to the usage template if // necessary. func UpdateAppUsageTemplate(app *kingpin.Application, args []string) { - // If ParseContext fails, kingpin will not show usage so there is no need - // to update anything here. See app.Parse for more details. - context, err := app.ParseContext(args) - if err != nil { - return - } - app.UsageTemplate(createUsageTemplate( - withCommandPrintfWidth(app, context), + withCommandPrintfWidth(app, args), )) } -// withCommandPrintfWidth returns an usage template option that +// withCommandPrintfWidth returns a usage template option that // updates command printf width if longer than default. -func withCommandPrintfWidth(app *kingpin.Application, context *kingpin.ParseContext) func(*usageTemplateOptions) { +func withCommandPrintfWidth(app *kingpin.Application, args []string) func(*usageTemplateOptions) { return func(opt *usageTemplateOptions) { var commands []*kingpin.CmdModel - if context.SelectedCommand != nil { - commands = context.SelectedCommand.Model().FlattenedCommands() + + // When selected command is "help", skip the "help" arg + // so the intended command is selected for calculation. + if len(args) > 0 && args[0] == "help" { + args = args[1:] + } + + appContext, err := app.ParseContext(args) + switch { + case appContext == nil: + slog.WarnContext(context.Background(), "No application context found") + return + + // Note that ParseContext may return the current selected command that's + // causing the error. We should continue in those cases when appContext is + // not nil. + case err != nil: + slog.InfoContext(context.Background(), "Error parsing application context", "error", err) + } + + if appContext.SelectedCommand != nil { + commands = appContext.SelectedCommand.Model().FlattenedCommands() } else { commands = app.Model().FlattenedCommands() } diff --git a/lib/utils/cli_test.go b/lib/utils/cli_test.go index dcfccdeaab9cf..1c2e031c4d312 100644 --- a/lib/utils/cli_test.go +++ b/lib/utils/cli_test.go @@ -19,12 +19,14 @@ package utils import ( + "bytes" "crypto/x509" "fmt" "io" "log/slog" "testing" + "github.com/alecthomas/kingpin/v2" "github.com/gravitational/trace" "github.com/stretchr/testify/require" ) @@ -161,3 +163,82 @@ func TestAllowWhitespace(t *testing.T) { require.Equal(t, tt.out, AllowWhitespace(tt.in), fmt.Sprintf("test case %v", i)) } } + +func TestUpdateAppUsageTemplate(t *testing.T) { + makeApp := func(usageWriter io.Writer) *kingpin.Application { + app := InitCLIParser("TestUpdateAppUsageTemplate", "some help message") + app.UsageWriter(usageWriter) + app.Terminate(func(int) {}) + + app.Command("hello", "Hello.") + + create := app.Command("create", "Create.") + create.Command("box", "Box.") + create.Command("rocket", "Rocket.") + return app + } + + tests := []struct { + name string + inputArgs []string + outputContains string + }{ + { + name: "command width aligned for app help", + inputArgs: []string{}, + outputContains: ` +Commands: + help Show help. + hello Hello. + create box Box. + create rocket Rocket. +`, + }, + { + name: "command width aligned for command help", + inputArgs: []string{"create"}, + outputContains: ` +Commands: + create box Box. + create rocket Rocket. +`, + }, + { + name: "command width aligned for unknown command error", + inputArgs: []string{"unknown"}, + outputContains: ` +Commands: + help Show help. + hello Hello. + create box Box. + create rocket Rocket. +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run("help flag", func(t *testing.T) { + var buffer bytes.Buffer + app := makeApp(&buffer) + args := append(tt.inputArgs, "--help") + UpdateAppUsageTemplate(app, args) + + app.Usage(args) + require.Contains(t, buffer.String(), tt.outputContains) + }) + + t.Run("help command", func(t *testing.T) { + var buffer bytes.Buffer + app := makeApp(&buffer) + args := append([]string{"help"}, tt.inputArgs...) + UpdateAppUsageTemplate(app, args) + + // HelpCommand is triggered on PreAction during Parse. + // See kingpin.Application.init for more details. + _, err := app.Parse(args) + require.NoError(t, err) + require.Contains(t, buffer.String(), tt.outputContains) + }) + }) + } +} From 6fa487296af9d6b56bf46e4122e0e93da489271f Mon Sep 17 00:00:00 2001 From: rosstimothy <39066650+rosstimothy@users.noreply.github.com> Date: Fri, 31 Jan 2025 10:53:33 -0500 Subject: [PATCH 28/28] Convert lib/services to use aws-sdk-go-v2 (#51706) There was only one usage of the legacy sdk, a call to arn.IsARN, which has a direct replacement in the v2 arn package. --- lib/services/role.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/services/role.go b/lib/services/role.go index 458662acf5305..e6327e2a1f4fd 100644 --- a/lib/services/role.go +++ b/lib/services/role.go @@ -31,7 +31,7 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/google/uuid" "github.com/gravitational/trace" jsoniter "github.com/json-iterator/go"