diff --git a/integration/helpers.go b/integration/helpers.go index d48975b765a56..e230a1815c4ec 100644 --- a/integration/helpers.go +++ b/integration/helpers.go @@ -253,6 +253,12 @@ func (i *TeleInstance) CreateEx(trustedSecrets []*InstanceSecrets, tconf *servic tconf = service.MakeDefaultConfig() } tconf.DataDir = dataDir + tconf.Auth.ClusterConfig, err = services.NewClusterConfig(services.ClusterConfigSpecV3{ + SessionRecording: services.RecordAtNode, + }) + if err != nil { + return trace.Wrap(err) + } tconf.Auth.ClusterName, err = services.NewClusterName(services.ClusterNameSpecV2{ ClusterName: i.Secrets.SiteName, }) diff --git a/lib/auth/api.go b/lib/auth/api.go index 503429e36814a..49b4dd8d85341 100644 --- a/lib/auth/api.go +++ b/lib/auth/api.go @@ -29,6 +29,9 @@ type AccessPoint interface { // server / certificate authority (CA) GetDomainName() (string, error) + // GetClusterConfig returns cluster level configuration. + GetClusterConfig() (services.ClusterConfig, error) + // GetNamespaces returns a list of namespaces GetNamespaces() ([]services.Namespace, error) diff --git a/lib/auth/apiserver.go b/lib/auth/apiserver.go index 4f0659276e3c3..fc00428263e18 100644 --- a/lib/auth/apiserver.go +++ b/lib/auth/apiserver.go @@ -151,6 +151,8 @@ func NewAPIServer(config *APIConfig) http.Handler { srv.DELETE("/:version/roles/:role", srv.withAuth(srv.deleteRole)) // cluster configuration + srv.GET("/:version/configuration", srv.withAuth(srv.getClusterConfig)) + srv.POST("/:version/configuration", srv.withAuth(srv.setClusterConfig)) srv.GET("/:version/configuration/name", srv.withAuth(srv.getClusterName)) srv.POST("/:version/configuration/name", srv.withAuth(srv.setClusterName)) srv.GET("/:version/configuration/static_tokens", srv.withAuth(srv.getStaticTokens)) @@ -1694,6 +1696,40 @@ func (s *APIServer) deleteRole(auth ClientI, w http.ResponseWriter, r *http.Requ return message(fmt.Sprintf("role '%v' deleted", role)), nil } +func (s *APIServer) getClusterConfig(auth ClientI, w http.ResponseWriter, r *http.Request, p httprouter.Params, version string) (interface{}, error) { + cc, err := auth.GetClusterConfig() + if err != nil { + return nil, trace.Wrap(err) + } + + return rawMessage(services.GetClusterConfigMarshaler().Marshal(cc, services.WithVersion(version))) +} + +type setClusterConfigReq struct { + ClusterConfig json.RawMessage `json:"cluster_config"` +} + +func (s *APIServer) setClusterConfig(auth ClientI, w http.ResponseWriter, r *http.Request, p httprouter.Params, version string) (interface{}, error) { + var req setClusterConfigReq + + err := httplib.ReadJSON(r, &req) + if err != nil { + return nil, trace.Wrap(err) + } + + cc, err := services.GetClusterConfigMarshaler().Unmarshal(req.ClusterConfig) + if err != nil { + return nil, trace.Wrap(err) + } + + err = auth.SetClusterConfig(cc) + if err != nil { + return nil, trace.Wrap(err) + } + + return message(fmt.Sprintf("cluster config set: %+v", cc)), nil +} + func (s *APIServer) getClusterName(auth ClientI, w http.ResponseWriter, r *http.Request, p httprouter.Params, version string) (interface{}, error) { cn, err := auth.GetClusterName() if err != nil { diff --git a/lib/auth/apiserver_test.go b/lib/auth/apiserver_test.go index 21f60c8dbae16..d038867ca05db 100644 --- a/lib/auth/apiserver_test.go +++ b/lib/auth/apiserver_test.go @@ -72,7 +72,7 @@ func (s *APISuite) SetUpTest(c *C) { s.bk, err = boltbk.New(backend.Params{"path": dir}) c.Assert(err, IsNil) - s.alog, err = events.NewAuditLog(dir) + s.alog, err = events.NewAuditLog(dir, true) c.Assert(err, IsNil) s.a = NewAuthServer(&InitConfig{ diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index 49858fb6a4002..9a212059f91d8 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -790,6 +790,25 @@ func (a *AuthWithRoles) DeleteRole(name string) error { return a.authServer.DeleteRole(name) } +// GetClusterConfig gets cluster level configuration. +func (a *AuthWithRoles) GetClusterConfig() (services.ClusterConfig, error) { + if err := a.action(defaults.Namespace, services.KindClusterConfig, services.VerbRead); err != nil { + return nil, trace.Wrap(err) + } + return a.authServer.GetClusterConfig() +} + +// SetClusterConfig sets cluster level configuration. +func (a *AuthWithRoles) SetClusterConfig(c services.ClusterConfig) error { + if err := a.action(defaults.Namespace, services.KindClusterConfig, services.VerbCreate); err != nil { + return trace.Wrap(err) + } + if err := a.action(defaults.Namespace, services.KindClusterConfig, services.VerbUpdate); err != nil { + return trace.Wrap(err) + } + return a.authServer.SetClusterConfig(c) +} + // GetClusterName gets the name of the cluster. func (a *AuthWithRoles) GetClusterName() (services.ClusterName, error) { if err := a.action(defaults.Namespace, services.KindClusterName, services.VerbRead); err != nil { diff --git a/lib/auth/clt.go b/lib/auth/clt.go index 177a2768a93c6..be7777553455c 100644 --- a/lib/auth/clt.go +++ b/lib/auth/clt.go @@ -1471,6 +1471,36 @@ func (c *Client) DeleteRole(name string) error { return trace.Wrap(err) } +// GetClusterConfig returns cluster level configuration information. +func (c *Client) GetClusterConfig() (services.ClusterConfig, error) { + out, err := c.Get(c.Endpoint("configuration"), url.Values{}) + if err != nil { + return nil, trace.Wrap(err) + } + + cc, err := services.GetClusterConfigMarshaler().Unmarshal(out.Bytes()) + if err != nil { + return nil, trace.Wrap(err) + } + + return cc, err +} + +// SetClusterConfig sets cluster level configuration information. +func (c *Client) SetClusterConfig(cc services.ClusterConfig) error { + data, err := services.GetClusterConfigMarshaler().Marshal(cc) + if err != nil { + return trace.Wrap(err) + } + + _, err = c.PostJSON(c.Endpoint("configuration"), &setClusterConfigReq{ClusterConfig: data}) + if err != nil { + return trace.Wrap(err) + } + + return nil +} + func (c *Client) GetClusterName() (services.ClusterName, error) { out, err := c.Get(c.Endpoint("configuration", "name"), url.Values{}) if err != nil { diff --git a/lib/auth/init.go b/lib/auth/init.go index 02d3728ae2838..5041dcf0f9850 100644 --- a/lib/auth/init.go +++ b/lib/auth/init.go @@ -102,6 +102,9 @@ type InitConfig struct { // AuthPreference defines the authentication type (local, oidc) and second // factor (off, otp, u2f) passed in from a configuration file. AuthPreference services.AuthPreference + + // ClusterConfig holds cluster level configuration. + ClusterConfig services.ClusterConfig } // Init instantiates and configures an instance of AuthServer @@ -151,6 +154,12 @@ func Init(cfg InitConfig) (*AuthServer, *Identity, error) { log.Infof("[INIT] Created Reverse Tunnel: %v", tunnel) } + err = asrv.SetClusterConfig(cfg.ClusterConfig) + if err != nil { + return nil, nil, trace.Wrap(err) + } + log.Infof("[INIT] Updating Cluster Configuration: %v", cfg.ClusterConfig) + // cluster name can only be set once. if it has already been set and we are // trying to update it to something else, hard fail. err = asrv.SetClusterName(cfg.ClusterName) diff --git a/lib/auth/permissions.go b/lib/auth/permissions.go index a2975511b2aa0..f0f3a3f7838b4 100644 --- a/lib/auth/permissions.go +++ b/lib/auth/permissions.go @@ -203,6 +203,7 @@ func GetCheckerForBuiltinRole(role teleport.Role) (services.AccessChecker, error services.NewRule(services.KindUser, services.RO()), services.NewRule(services.KindRole, services.RO()), services.NewRule(services.KindClusterAuthPreference, services.RO()), + services.NewRule(services.KindClusterConfig, services.RO()), services.NewRule(services.KindClusterName, services.RO()), services.NewRule(services.KindStaticTokens, services.RO()), services.NewRule(services.KindTunnelConnection, services.RW()), diff --git a/lib/auth/tun_test.go b/lib/auth/tun_test.go index 05a0900e34246..1d6dcc70dffdf 100644 --- a/lib/auth/tun_test.go +++ b/lib/auth/tun_test.go @@ -68,7 +68,7 @@ func (s *TunSuite) SetUpTest(c *C) { s.bk, err = dir.New(backend.Params{"path": s.dir}) c.Assert(err, IsNil) - s.alog, err = events.NewAuditLog(s.dir) + s.alog, err = events.NewAuditLog(s.dir, true) c.Assert(err, IsNil) s.sessionServer, err = session.New(s.bk) diff --git a/lib/config/configuration.go b/lib/config/configuration.go index 06c008d4aef69..bb584c82c5600 100644 --- a/lib/config/configuration.go +++ b/lib/config/configuration.go @@ -400,6 +400,12 @@ func ApplyFileConfig(fc *FileConfig, cfg *service.Config) error { log.Warnf(warningMessage) } } + // read in and set session recording + clusterConfig, err := fc.Auth.SessionRecording.Parse() + if err != nil { + return trace.Wrap(err) + } + cfg.Auth.ClusterConfig = clusterConfig // apply "ssh_service" section if fc.SSH.ListenAddress != "" { diff --git a/lib/config/fileconf.go b/lib/config/fileconf.go index d8da284596d1e..dbc7ed6995571 100644 --- a/lib/config/fileconf.go +++ b/lib/config/fileconf.go @@ -128,6 +128,7 @@ var ( "kex_algos": false, "mac_algos": false, "connector_name": false, + "session_recording": false, } ) @@ -450,24 +451,9 @@ func (s *Service) Disabled() bool { type Auth struct { Service `yaml:",inline"` - // DomainName is the name of the CA who manages this cluster - //DomainName string `yaml:"cluster_name,omitempty"` + // ClusterName is the name of the CA who manages this cluster ClusterName ClusterName `yaml:"cluster_name,omitempty"` - // TrustedClustersFile is a file path to a file containing public CA keys - // of clusters we trust. One key per line, those starting with '#' are comments - // TODO: THIS SETTING IS DEPRECATED - TrustedClusters []TrustedCluster `yaml:"trusted_clusters,omitempty"` - - // FOR INTERNAL USE: - // Authorities : 3rd party certificate authorities (CAs) this auth service trusts. - Authorities []Authority `yaml:"authorities,omitempty"` - - // FOR INTERNAL USE: - // ReverseTunnels is a list of SSH tunnels to 3rd party proxy services (used to talk - // to 3rd party auth servers we trust) - ReverseTunnels []ReverseTunnel `yaml:"reverse_tunnels,omitempty"` - // StaticTokens are pre-defined host provisioning tokens supplied via config file for // environments where paranoid security is not needed // @@ -479,17 +465,34 @@ type Auth struct { // type, second factor type, specific connector information, etc. Authentication *AuthenticationConfig `yaml:"authentication,omitempty"` + // SessionRecording determines where the session is recorded: node, proxy, or off. + SessionRecording SessionRecording `yaml:"session_recording"` + + // FOR INTERNAL USE: + // Authorities : 3rd party certificate authorities (CAs) this auth service trusts. + Authorities []Authority `yaml:"authorities,omitempty"` + + // FOR INTERNAL USE: + // ReverseTunnels is a list of SSH tunnels to 3rd party proxy services (used to talk + // to 3rd party auth servers we trust) + ReverseTunnels []ReverseTunnel `yaml:"reverse_tunnels,omitempty"` + + // TrustedClustersFile is a file path to a file containing public CA keys + // of clusters we trust. One key per line, those starting with '#' are comments + // Deprecated: Remove in Teleport 2.4.1. + TrustedClusters []TrustedCluster `yaml:"trusted_clusters,omitempty"` + // OIDCConnectors is a list of trusted OpenID Connect Identity providers - // Deprecated: Use OIDC section in Authentication section instead. + // Deprecated: Remove in Teleport 2.4.1. OIDCConnectors []OIDCConnector `yaml:"oidc_connectors,omitempty"` // Configuration for "universal 2nd factor" - // Deprecated: Use U2F section in Authentication section instead. + // Deprecated: Remove in Teleport 2.4.1. U2F U2F `yaml:"u2f,omitempty"` // DynamicConfig determines when file configuration is pushed to the backend. Setting // it here overrides defaults. - // TODO: THIS SETTING IS DEPRECATED + // Deprecated: Remove in Teleport 2.4.1. DynamicConfig *bool `yaml:"dynamic_config,omitempty"` } @@ -616,6 +619,16 @@ func (u *UniversalSecondFactor) Parse() services.U2F { } } +// SessionRecording determines where the session is recorded: node, proxy, or off. +type SessionRecording string + +// Parse reads session_recording and creates a services.ClusterConfig. +func (s SessionRecording) Parse() (services.ClusterConfig, error) { + return services.NewClusterConfig(services.ClusterConfigSpecV3{ + SessionRecording: services.RecordingType(s), + }) +} + // SSH is 'ssh_service' section of the config file type SSH struct { Service `yaml:",inline"` diff --git a/lib/events/auditlog.go b/lib/events/auditlog.go index f74faba81bfa3..0786a099eef93 100644 --- a/lib/events/auditlog.go +++ b/lib/events/auditlog.go @@ -14,38 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* -Package events currently implements the audit log using a simple filesystem backend. -"Implements" means it implements events.IAuditLog interface (see events/api.go) - -The main log files are saved as: - /var/lib/teleport/log/.log - -Each session has its own session log stored as two files - /var/lib/teleport/log/.session.log - /var/lib/teleport/log/.session.bytes - -Where: - - .session.log (same events as in the main log, but related to the session) - - .session.bytes (recorded session bytes: PTY IO) - -The log file is rotated every 24 hours. The old files must be cleaned -up or archived by an external tool. - -Log file format: -utc_date,action,json_fields - -Common JSON fields -- user : teleport user -- login : server OS login, the user logged in as -- addr.local : server address:port -- addr.remote: connected client's address:port -- sid : session ID (GUID format) - -Examples: -2016-04-25 22:37:29 +0000 UTC,session.start,{"addr.local":"127.0.0.1:3022","addr.remote":"127.0.0.1:35732","login":"root","sid":"4a9d97de-0b36-11e6-a0b3-d8cb8ae5080e","user":"vincent"} -2016-04-25 22:54:31 +0000 UTC,exec,{"addr.local":"127.0.0.1:3022","addr.remote":"127.0.0.1:35949","command":"-bash -c ls /","login":"root","user":"vincent"} -*/ package events import ( @@ -60,7 +28,6 @@ import ( "sort" "strings" "sync" - "sync/atomic" "time" "github.com/gravitational/teleport/lib/defaults" @@ -109,7 +76,7 @@ type TimeSourceFunc func() time.Time // sessions. It implements IAuditLog type AuditLog struct { sync.Mutex - loggers map[session.ID]*SessionLogger + loggers map[session.ID]SessionLogger dataDir string // file is the current global event log file. As the time goes @@ -125,126 +92,27 @@ type AuditLog struct { // same as time.Now(), but helps with testing TimeSource TimeSourceFunc -} - -// BaseSessionLogger implements the common features of a session logger. The imporant -// property of the base logger is that it never fails and can be used as a fallback -// implementation behind more sophisticated loggers -type SessionLogger struct { - sync.Mutex - - sid session.ID - - // eventsFile stores logged events, just like the main logger, except - // these are all associated with this session - eventsFile *os.File - - // streamFile stores bytes from the session terminal I/O for replaying - streamFile *os.File - - // counter of how many bytes have been written during this session - writtenBytes int64 - - // same as time.Now(), but helps with testing - timeSource TimeSourceFunc - - createdTime time.Time -} - -// LogEvent logs an event associated with this session -func (sl *SessionLogger) LogEvent(fields EventFields) { - sl.logEvent(fields, time.Time{}) -} - -// LogEvent logs an event associated with this session -func (sl *SessionLogger) logEvent(fields EventFields, start time.Time) { - sl.Lock() - defer sl.Unlock() - - // add "bytes written" counter: - fields[SessionByteOffset] = atomic.LoadInt64(&sl.writtenBytes) - - // add "milliseconds since" timestamp: - var now time.Time - if start.IsZero() { - now = sl.timeSource().In(time.UTC).Round(time.Millisecond) - } else { - now = start.In(time.UTC).Round(time.Millisecond) - } - - fields[SessionEventTimestamp] = int(now.Sub(sl.createdTime).Nanoseconds() / 1000000) - fields[EventTime] = now - - line := eventToLine(fields) - if sl.eventsFile != nil { - _, err := fmt.Fprintln(sl.eventsFile, line) - if err != nil { - log.Error(err) - } - } -} - -// Close() is called when clients close on the requested "session writer". -// We ignore their requests because this writer (file) should be closed only -// when the session logger is closed -func (sl *SessionLogger) Close() error { - log.Infof("sessionLogger.Close(sid=%s)", sl.sid) - return nil + // recordSessions controls if sessions are recorded along with audit events. + recordSessions bool } -// Finalize is called by the session when it's closing. This is where we're -// releasing audit resources associated with the session -func (sl *SessionLogger) Finalize() error { - sl.Lock() - defer sl.Unlock() - if sl.streamFile != nil { - auditOpenFiles.Dec() - log.Infof("sessionLogger.Finalize(sid=%s)", sl.sid) - sl.streamFile.Close() - sl.eventsFile.Close() - sl.streamFile = nil - sl.eventsFile = nil - } - return nil -} - -// WriteChunk takes a stream of bytes (usually the output from a session terminal) -// and writes it into a "stream file", for future replay of interactive sessions. -func (sl *SessionLogger) WriteChunk(chunk *SessionChunk) (written int, err error) { - if sl.streamFile == nil { - err := trace.Errorf("session %v error: attempt to write to a closed file", sl.sid) - return 0, trace.Wrap(err) - } - if written, err = sl.streamFile.Write(chunk.Data); err != nil { - log.Error(err) - return written, trace.Wrap(err) - } - - // log this as a session event (but not more often than once a sec) - sl.logEvent(EventFields{ - EventType: SessionPrintEvent, - SessionPrintEventBytes: len(chunk.Data), - }, time.Unix(0, chunk.Time)) - - // increase the total lengh of the stream - atomic.AddInt64(&sl.writtenBytes, int64(len(chunk.Data))) - return written, nil -} - -// Creates and returns a new Audit Log oboject whish will store its logfiles -// in a given directory> -func NewAuditLog(dataDir string) (IAuditLog, error) { +// Creates and returns a new Audit Log oboject whish will store its logfiles in +// a given directory. Session recording can be disabled by setting +// recordSessions to false. +func NewAuditLog(dataDir string, recordSessions bool) (IAuditLog, error) { // create a directory for session logs: sessionDir := filepath.Join(dataDir, SessionLogsDir) if err := os.MkdirAll(sessionDir, 0770); err != nil { return nil, trace.Wrap(err) } + al := &AuditLog{ - loggers: make(map[session.ID]*SessionLogger, 0), + loggers: make(map[session.ID]SessionLogger, 0), dataDir: dataDir, RotationPeriod: defaults.LogRotationPeriod, TimeSource: time.Now, + recordSessions: recordSessions, } if err := al.migrateSessions(); err != nil { return nil, trace.Wrap(err) @@ -278,8 +146,7 @@ func (l *AuditLog) migrateSessions() error { return nil } -// PostSessionSlice submits slice of session chunks -// to the audit log server +// PostSessionSlice submits slice of session chunks to the audit log server. func (l *AuditLog) PostSessionSlice(slice SessionSlice) error { if slice.Namespace == "" { return trace.BadParameter("missing parameter Namespace") @@ -356,7 +223,6 @@ func (l *AuditLog) GetSessionEvents(namespace string, sid session.ID, afterN int } logFile, err := os.OpenFile(l.sessionLogFn(namespace, sid), os.O_RDONLY, 0640) if err != nil { - log.Warn(err) // no file found? this means no events have been logged yet if os.IsNotExist(err) { return nil, nil @@ -620,7 +486,7 @@ func (l *AuditLog) sessionLogFn(namespace string, sid session.ID) string { // LoggerFor creates a logger for a specified session. Session loggers allow // to group all events into special "session log files" for easier audit -func (l *AuditLog) LoggerFor(namespace string, sid session.ID) (sl *SessionLogger, err error) { +func (l *AuditLog) LoggerFor(namespace string, sid session.ID) (sl SessionLogger, err error) { l.Lock() defer l.Unlock() @@ -628,6 +494,12 @@ func (l *AuditLog) LoggerFor(namespace string, sid session.ID) (sl *SessionLogge return nil, trace.BadParameter("missing parameter namespace") } + // if we are not recording sessions, create a logger that discards all + // session data sent to it. + if l.recordSessions == false { + return &discardSessionLogger{}, nil + } + sl, ok := l.loggers[sid] if ok { return sl, nil @@ -650,7 +522,7 @@ func (l *AuditLog) LoggerFor(namespace string, sid session.ID) (sl *SessionLogge log.Error(err) return nil, trace.Wrap(err) } - sl = &SessionLogger{ + sl = &diskSessionLogger{ sid: sid, streamFile: fstream, eventsFile: fevents, diff --git a/lib/events/auditlog_test.go b/lib/events/auditlog_test.go index baa43b386d41b..8ae50ce15e0c2 100644 --- a/lib/events/auditlog_test.go +++ b/lib/events/auditlog_test.go @@ -30,8 +30,8 @@ func (a *AuditTestSuite) TearDownSuite(c *check.C) { // creates a file-based audit log and returns a proper *AuditLog pointer // instead of the usual IAuditLog interface -func (a *AuditTestSuite) makeLog(c *check.C, dataDir string) (*AuditLog, error) { - alog, err := NewAuditLog(dataDir) +func (a *AuditTestSuite) makeLog(c *check.C, dataDir string, recordSessions bool) (*AuditLog, error) { + alog, err := NewAuditLog(dataDir, recordSessions) if err != nil { return nil, trace.Wrap(err) } @@ -48,7 +48,7 @@ func (a *AuditTestSuite) SetUpSuite(c *check.C) { } func (a *AuditTestSuite) TestNew(c *check.C) { - alog, err := a.makeLog(c, a.dataDir) + alog, err := a.makeLog(c, a.dataDir, true) c.Assert(err, check.IsNil) // close twice: c.Assert(alog.Close(), check.IsNil) @@ -60,7 +60,7 @@ func (a *AuditTestSuite) TestComplexLogging(c *check.C) { os.RemoveAll(a.dataDir) // create audit log, write a couple of events into it, close it - alog, err := a.makeLog(c, a.dataDir) + alog, err := a.makeLog(c, a.dataDir, true) c.Assert(err, check.IsNil) alog.TimeSource = func() time.Time { return now } @@ -135,10 +135,44 @@ func (a *AuditTestSuite) TestComplexLogging(c *check.C) { c.Assert(found[0].GetString(EventLogin), check.Equals, "vincent") } +func (a *AuditTestSuite) TestSessionRecordingOff(c *check.C) { + now := time.Now().In(time.UTC).Round(time.Second) + os.RemoveAll(a.dataDir) + + // create audit log with session recording disabled + alog, err := a.makeLog(c, a.dataDir, false) + c.Assert(err, check.IsNil) + alog.TimeSource = func() time.Time { return now } + + // emit "session.start" event into the audit log for session "200" + err = alog.EmitAuditEvent(SessionStartEvent, EventFields{SessionEventID: "200", EventLogin: "doggy", EventNamespace: defaults.Namespace}) + c.Assert(err, check.IsNil) + + // type "hello" into session "200" + err = alog.PostSessionChunk(defaults.Namespace, "200", bytes.NewBufferString("hello")) + c.Assert(err, check.IsNil) + + // emit "sesion-end" event into the audit log for session "200" + err = alog.EmitAuditEvent(SessionEndEvent, EventFields{SessionEventID: "200", EventLogin: "doggy", EventNamespace: defaults.Namespace}) + c.Assert(err, check.IsNil) + + // get all events from the audit log, we should have two + found, err := alog.SearchEvents(now.Add(-time.Hour), now.Add(time.Hour), "") + c.Assert(err, check.IsNil) + c.Assert(found, check.HasLen, 2) + c.Assert(found[0].GetString(EventLogin), check.Equals, "doggy") + c.Assert(found[1].GetString(EventLogin), check.Equals, "doggy") + + // inspect the session log for "200". it should be empty. + history, err := alog.GetSessionEvents(defaults.Namespace, "200", 0) + c.Assert(err, check.IsNil) + c.Assert(history, check.HasLen, 0) +} + func (a *AuditTestSuite) TestBasicLogging(c *check.C) { now := time.Now().In(time.UTC).Round(time.Second) // create audit log, write a couple of events into it, close it - alog, err := a.makeLog(c, a.dataDir) + alog, err := a.makeLog(c, a.dataDir, true) c.Assert(err, check.IsNil) alog.TimeSource = func() time.Time { return now } diff --git a/lib/events/discard.go b/lib/events/discard.go index 307c192a4f9e5..1af556642933d 100644 --- a/lib/events/discard.go +++ b/lib/events/discard.go @@ -37,3 +37,25 @@ func (d *DiscardAuditLog) SearchEvents(fromUTC, toUTC time.Time, query string) ( func (d *DiscardAuditLog) SearchSessionEvents(fromUTC time.Time, toUTC time.Time) ([]EventFields, error) { return make([]EventFields, 0), nil } + +// discardSessionLogger implements a session logger that does nothing. It +// discards all events and chunks written to it. It is used when session +// recording has been disabled. +type discardSessionLogger struct { +} + +func (d *discardSessionLogger) LogEvent(fields EventFields) { + return +} + +func (d *discardSessionLogger) Close() error { + return nil +} + +func (d *discardSessionLogger) Finalize() error { + return nil +} + +func (d *discardSessionLogger) WriteChunk(chunk *SessionChunk) (written int, err error) { + return 0, nil +} diff --git a/lib/events/doc.go b/lib/events/doc.go new file mode 100644 index 0000000000000..bd24a1953cfa5 --- /dev/null +++ b/lib/events/doc.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package events currently implements the audit log using a simple filesystem backend. +"Implements" means it implements events.IAuditLog interface (see events/api.go) + +The main log files are saved as: + /var/lib/teleport/log/.log + +Each session has its own session log stored as two files + /var/lib/teleport/log/.session.log + /var/lib/teleport/log/.session.bytes + +Where: + - .session.log (same events as in the main log, but related to the session) + - .session.bytes (recorded session bytes: PTY IO) + +The log file is rotated every 24 hours. The old files must be cleaned +up or archived by an external tool. + +Log file format: +utc_date,action,json_fields + +Common JSON fields +- user : teleport user +- login : server OS login, the user logged in as +- addr.local : server address:port +- addr.remote: connected client's address:port +- sid : session ID (GUID format) + +Examples: +2016-04-25 22:37:29 +0000 UTC,session.start,{"addr.local":"127.0.0.1:3022","addr.remote":"127.0.0.1:35732","login":"root","sid":"4a9d97de-0b36-11e6-a0b3-d8cb8ae5080e","user":"vincent"} +2016-04-25 22:54:31 +0000 UTC,exec,{"addr.local":"127.0.0.1:3022","addr.remote":"127.0.0.1:35949","command":"-bash -c ls /","login":"root","user":"vincent"} +*/ +package events diff --git a/lib/events/sessionlog.go b/lib/events/sessionlog.go new file mode 100644 index 0000000000000..0da811ffb120d --- /dev/null +++ b/lib/events/sessionlog.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "fmt" + "os" + "sync" + "sync/atomic" + "time" + + "github.com/gravitational/teleport/lib/session" + "github.com/gravitational/trace" + + log "github.com/sirupsen/logrus" +) + +// SessionLogger is an interface that all session loggers must implement. +type SessionLogger interface { + // LogEvent logs events associated with this session. + LogEvent(fields EventFields) + + // Close is called when clients close on the requested "session writer". + // We ignore their requests because this writer (file) should be closed only + // when the session logger is closed. + Close() error + + // Finalize is called by the session when it's closing. This is where we're + // releasing audit resources associated with the session + Finalize() error + + // WriteChunk takes a stream of bytes (usually the output from a session + // terminal) and writes it into a "stream file", for future replay of + // interactive sessions. + WriteChunk(chunk *SessionChunk) (written int, err error) +} + +// diskSessionLogger implements a disk based session logger. The imporant +// property of the disk based logger is that it never fails and can be used as +// a fallback implementation behind more sophisticated loggers. +type diskSessionLogger struct { + sync.Mutex + + sid session.ID + + // eventsFile stores logged events, just like the main logger, except + // these are all associated with this session + eventsFile *os.File + + // streamFile stores bytes from the session terminal I/O for replaying + streamFile *os.File + + // counter of how many bytes have been written during this session + writtenBytes int64 + + // same as time.Now(), but helps with testing + timeSource TimeSourceFunc + + createdTime time.Time +} + +// LogEvent logs an event associated with this session +func (sl *diskSessionLogger) LogEvent(fields EventFields) { + sl.logEvent(fields, time.Time{}) +} + +// LogEvent logs an event associated with this session +func (sl *diskSessionLogger) logEvent(fields EventFields, start time.Time) { + sl.Lock() + defer sl.Unlock() + + // add "bytes written" counter: + fields[SessionByteOffset] = atomic.LoadInt64(&sl.writtenBytes) + + // add "milliseconds since" timestamp: + var now time.Time + if start.IsZero() { + now = sl.timeSource().In(time.UTC).Round(time.Millisecond) + } else { + now = start.In(time.UTC).Round(time.Millisecond) + } + + fields[SessionEventTimestamp] = int(now.Sub(sl.createdTime).Nanoseconds() / 1000000) + fields[EventTime] = now + + line := eventToLine(fields) + + if sl.eventsFile != nil { + _, err := fmt.Fprintln(sl.eventsFile, line) + if err != nil { + log.Error(err) + } + } +} + +// Close is called when clients close on the requested "session writer". +// We ignore their requests because this writer (file) should be closed only +// when the session logger is closed +func (sl *diskSessionLogger) Close() error { + log.Infof("sessionLogger.Close(sid=%s)", sl.sid) + return nil +} + +// Finalize is called by the session when it's closing. This is where we're +// releasing audit resources associated with the session +func (sl *diskSessionLogger) Finalize() error { + sl.Lock() + defer sl.Unlock() + if sl.streamFile != nil { + auditOpenFiles.Dec() + log.Infof("sessionLogger.Finalize(sid=%s)", sl.sid) + sl.streamFile.Close() + sl.eventsFile.Close() + sl.streamFile = nil + sl.eventsFile = nil + } + return nil +} + +// WriteChunk takes a stream of bytes (usually the output from a session terminal) +// and writes it into a "stream file", for future replay of interactive sessions. +func (sl *diskSessionLogger) WriteChunk(chunk *SessionChunk) (written int, err error) { + if sl.streamFile == nil { + err := trace.Errorf("session %v error: attempt to write to a closed file", sl.sid) + return 0, trace.Wrap(err) + } + if written, err = sl.streamFile.Write(chunk.Data); err != nil { + log.Error(err) + return written, trace.Wrap(err) + } + + // log this as a session event (but not more often than once a sec) + sl.logEvent(EventFields{ + EventType: SessionPrintEvent, + SessionPrintEventBytes: len(chunk.Data), + }, time.Unix(0, chunk.Time)) + + // increase the total lengh of the stream + atomic.AddInt64(&sl.writtenBytes, int64(len(chunk.Data))) + return written, nil +} diff --git a/lib/service/cfg.go b/lib/service/cfg.go index b7b5d47728d34..036df4881d032 100644 --- a/lib/service/cfg.go +++ b/lib/service/cfg.go @@ -239,6 +239,9 @@ type AuthConfig struct { // Roles is a set of roles to pre-provision for this cluster Roles []services.Role + // ClusterConfig stores cluster level configuration. + ClusterConfig services.ClusterConfig + // ClusterName is a name that identifies this authority and all // host nodes in the cluster that will share this authority domain name // as a base name, e.g. if authority domain name is example.com, diff --git a/lib/service/service.go b/lib/service/service.go index 573be7ed98c71..926f9b864ed9e 100644 --- a/lib/service/service.go +++ b/lib/service/service.go @@ -303,13 +303,29 @@ func (process *TeleportProcess) initAuthService(authority auth.Authority) error } // create the audit log, which will be consuming (and recording) all events - // and record sessions + // and recording all sessions. var auditLog events.IAuditLog if cfg.Auth.NoAudit { + // this is for teleconsole auditLog = &events.DiscardAuditLog{} - log.Warn("the audit and session recording are turned off") + + warningMessage := "Warning: Teleport audit and session recording have been " + + "turned off. This is dangerous, you will not be able to view audit events " + + "or save and playback recorded sessions." + log.Warn(warningMessage) } else { - auditLog, err = events.NewAuditLog(filepath.Join(cfg.DataDir, "log")) + // check if session recording has been disabled. note, we will continue + // logging audit events, we just won't record sessions. + recordSessions := true + if cfg.Auth.ClusterConfig.GetSessionRecording() == services.RecordOff { + recordSessions = false + + warningMessage := "Warning: Teleport session recording have been turned off. " + + "This is dangerous, you will not be able to save and playback sessions." + log.Warn(warningMessage) + } + + auditLog, err = events.NewAuditLog(filepath.Join(cfg.DataDir, "log"), recordSessions) if err != nil { return trace.Wrap(err) } @@ -319,6 +335,7 @@ func (process *TeleportProcess) initAuthService(authority auth.Authority) error authServer, identity, err := auth.Init(auth.InitConfig{ Backend: b, Authority: authority, + ClusterConfig: cfg.Auth.ClusterConfig, ClusterName: cfg.Auth.ClusterName, AuthServiceName: cfg.Hostname, DataDir: cfg.DataDir, @@ -647,9 +664,8 @@ func (process *TeleportProcess) initProxy() error { return trace.Wrap(err) } } - myRole := teleport.RoleProxy - process.RegisterWithAuthServer(process.Config.Token, myRole, ProxyIdentityEvent) + process.RegisterWithAuthServer(process.Config.Token, teleport.RoleProxy, ProxyIdentityEvent) process.RegisterFunc(func() error { eventsC := make(chan Event) process.WaitForEvent(ProxyIdentityEvent, eventsC, make(chan struct{})) diff --git a/lib/services/clusterconfig.go b/lib/services/clusterconfig.go new file mode 100644 index 0000000000000..533eb48ab73ae --- /dev/null +++ b/lib/services/clusterconfig.go @@ -0,0 +1,246 @@ +/* +Copyright 2017 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package services + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/gravitational/teleport/lib/defaults" + "github.com/gravitational/teleport/lib/utils" + + "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" +) + +// ClusterConfig defines cluster level configuration. This is a configuration +// resource, never create more than one instance of it. +type ClusterConfig interface { + // Resource provides common resource properties. + Resource + + // GetSessionRecording gets where the session is being recorded. + GetSessionRecording() RecordingType + + // SetSessionRecording sets where the session is recorded. + SetSessionRecording(RecordingType) + + // CheckAndSetDefaults checks and set default values for missing fields. + CheckAndSetDefaults() error +} + +// NewClusterConfig is a convenience wrapper to create a ClusterConfig resource. +func NewClusterConfig(spec ClusterConfigSpecV3) (ClusterConfig, error) { + cc := ClusterConfigV3{ + Kind: KindClusterConfig, + Version: V3, + Metadata: Metadata{ + Name: MetaNameClusterConfig, + Namespace: defaults.Namespace, + }, + Spec: spec, + } + if err := cc.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + + return &cc, nil +} + +// ClusterConfigV3 implements the ClusterConfig interface. +type ClusterConfigV3 struct { + // Kind is a resource kind - always resource. + Kind string `json:"kind"` + + // Version is a resource version. + Version string `json:"version"` + + // Metadata is metadata about the resource. + Metadata Metadata `json:"metadata"` + + // Spec is the specification of the resource. + Spec ClusterConfigSpecV3 `json:"spec"` +} + +// RecordingType holds where the session will be recorded. +type RecordingType string + +const ( + // RecordAtNode is the default. Sessions are recorded at Teleport nodes. + RecordAtNode RecordingType = "node" + + // RecordAtProxy enabled the recording proxy which intercepts and records all sessions. + RecordAtProxy RecordingType = "proxy" + + // RecordOff is used to disable session recording completely. + RecordOff RecordingType = "off" +) + +// ClusterConfigSpecV3 is the actual data we care about for ClusterConfig. +type ClusterConfigSpecV3 struct { + // SessionRecording controls where (or if) the session is recorded. + SessionRecording RecordingType `json:"session_recording"` +} + +// GetName returns the name of the cluster. +func (c *ClusterConfigV3) GetName() string { + return c.Metadata.Name +} + +// SetName sets the name of the cluster. +func (c *ClusterConfigV3) SetName(e string) { + c.Metadata.Name = e +} + +// Expires retuns object expiry setting +func (c *ClusterConfigV3) Expiry() time.Time { + return c.Metadata.Expiry() +} + +// SetExpiry sets expiry time for the object +func (c *ClusterConfigV3) SetExpiry(expires time.Time) { + c.Metadata.SetExpiry(expires) +} + +// SetTTL sets Expires header using realtime clock +func (c *ClusterConfigV3) SetTTL(clock clockwork.Clock, ttl time.Duration) { + c.Metadata.SetTTL(clock, ttl) +} + +// GetMetadata returns object metadata +func (c *ClusterConfigV3) GetMetadata() Metadata { + return c.Metadata +} + +// GetClusterConfig gets the name of the cluster. +func (c *ClusterConfigV3) GetSessionRecording() RecordingType { + return c.Spec.SessionRecording +} + +// SetClusterConfig sets the name of the cluster. +func (c *ClusterConfigV3) SetSessionRecording(s RecordingType) { + c.Spec.SessionRecording = s +} + +// CheckAndSetDefaults checks validity of all parameters and sets defaults. +func (c *ClusterConfigV3) CheckAndSetDefaults() error { + // make sure we have defaults for all metadata fields + err := c.Metadata.CheckAndSetDefaults() + if err != nil { + return trace.Wrap(err) + } + + if c.Spec.SessionRecording == "" { + c.Spec.SessionRecording = RecordAtNode + } + + // check if the recording type is valid + all := []string{string(RecordAtNode), string(RecordAtProxy), string(RecordOff)} + ok := utils.SliceContainsStr(all, string(c.Spec.SessionRecording)) + if !ok { + return trace.BadParameter("session_recording must either be: %v", strings.Join(all, ",")) + } + + return nil +} + +// String represents a human readable version of the cluster name. +func (c *ClusterConfigV3) String() string { + return fmt.Sprintf("ClusterConfig(SessionRecording=%v)", c.Spec.SessionRecording) +} + +// ClusterConfigSpecSchemaTemplate is a template for ClusterConfig schema. +const ClusterConfigSpecSchemaTemplate = `{ + "type": "object", + "additionalProperties": false, + "properties": { + "session_recording": { + "type": "string" + }%v + } +}` + +// GetClusterConfigSchema returns the schema with optionally injected +// schema for extensions. +func GetClusterConfigSchema(extensionSchema string) string { + var clusterConfigSchema string + if clusterConfigSchema == "" { + clusterConfigSchema = fmt.Sprintf(ClusterConfigSpecSchemaTemplate, "") + } else { + clusterConfigSchema = fmt.Sprintf(ClusterConfigSpecSchemaTemplate, ","+extensionSchema) + } + return fmt.Sprintf(V2SchemaTemplate, MetadataSchema, clusterConfigSchema, DefaultDefinitions) +} + +// ClusterConfigMarshaler implements marshal/unmarshal of ClusterConfig implementations +// mostly adds support for extended versions. +type ClusterConfigMarshaler interface { + Marshal(c ClusterConfig, opts ...MarshalOption) ([]byte, error) + Unmarshal(bytes []byte) (ClusterConfig, error) +} + +var clusterConfigMarshaler ClusterConfigMarshaler = &TeleportClusterConfigMarshaler{} + +// SetClusterConfigMarshaler sets the marshaler. +func SetClusterConfigMarshaler(m ClusterConfigMarshaler) { + marshalerMutex.Lock() + defer marshalerMutex.Unlock() + clusterConfigMarshaler = m +} + +// GetClusterConfigMarshaler gets the marshaler. +func GetClusterConfigMarshaler() ClusterConfigMarshaler { + marshalerMutex.Lock() + defer marshalerMutex.Unlock() + return clusterConfigMarshaler +} + +// TeleportClusterConfigMarshaler is used to marshal and unmarshal ClusterConfig. +type TeleportClusterConfigMarshaler struct{} + +// Unmarshal unmarshals ClusterConfig from JSON. +func (t *TeleportClusterConfigMarshaler) Unmarshal(bytes []byte) (ClusterConfig, error) { + var clusterConfig ClusterConfigV3 + + if len(bytes) == 0 { + return nil, trace.BadParameter("missing resource data") + } + + err := utils.UnmarshalWithSchema(GetClusterConfigSchema(""), &clusterConfig, bytes) + if err != nil { + return nil, trace.BadParameter(err.Error()) + } + + err = clusterConfig.CheckAndSetDefaults() + if err != nil { + return nil, trace.Wrap(err) + } + + return &clusterConfig, nil +} + +// Marshal marshals ClusterConfig to JSON. +func (t *TeleportClusterConfigMarshaler) Marshal(c ClusterConfig, opts ...MarshalOption) ([]byte, error) { + b, err := json.Marshal(c) + if err != nil { + return nil, trace.Wrap(err) + } + + return b, nil +} diff --git a/lib/services/configuration.go b/lib/services/configuration.go index 5f34095f2dd5d..00cf7f7fb829d 100644 --- a/lib/services/configuration.go +++ b/lib/services/configuration.go @@ -34,4 +34,9 @@ type ClusterConfiguration interface { GetAuthPreference() (AuthPreference, error) // SetAuthPreference sets services.AuthPreference from the backend. SetAuthPreference(AuthPreference) error + + // GetClusterConfig gets services.ClusterConfig from the backend. + GetClusterConfig() (ClusterConfig, error) + // SetClusterConfig sets services.ClusterConfig on the backend. + SetClusterConfig(ClusterConfig) error } diff --git a/lib/services/local/configuration.go b/lib/services/local/configuration.go index 4f98185c41e3b..5e1d28fa55637 100644 --- a/lib/services/local/configuration.go +++ b/lib/services/local/configuration.go @@ -120,3 +120,31 @@ func (s *ClusterConfigurationService) SetAuthPreference(preferences services.Aut return nil } + +// GetClusterConfig gets services.ClusterConfig from the backend. +func (s *ClusterConfigurationService) GetClusterConfig() (services.ClusterConfig, error) { + data, err := s.GetVal([]string{"cluster_configuration"}, "general") + if err != nil { + if trace.IsNotFound(err) { + return nil, trace.NotFound("cluster configuration not found") + } + return nil, trace.Wrap(err) + } + + return services.GetClusterConfigMarshaler().Unmarshal(data) +} + +// SetClusterConfig sets services.ClusterConfig on the backend. +func (s *ClusterConfigurationService) SetClusterConfig(c services.ClusterConfig) error { + data, err := services.GetClusterConfigMarshaler().Marshal(c) + if err != nil { + return trace.Wrap(err) + } + + err = s.UpsertVal([]string{"cluster_configuration"}, "general", []byte(data), backend.Forever) + if err != nil { + return trace.Wrap(err) + } + + return nil +} diff --git a/lib/services/local/configuration_test.go b/lib/services/local/configuration_test.go index 3c84c78bd9d8c..541abcac58471 100644 --- a/lib/services/local/configuration_test.go +++ b/lib/services/local/configuration_test.go @@ -81,3 +81,22 @@ func (s *ClusterConfigurationSuite) TestCycle(c *check.C) { c.Assert(gotAP.GetType(), check.Equals, "local") c.Assert(gotAP.GetSecondFactor(), check.Equals, "otp") } + +func (s *ClusterConfigurationSuite) TestSessionRecording(c *check.C) { + // don't allow invalid session recording values + clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{ + SessionRecording: "foo", + }) + c.Assert(err, check.NotNil) + + // default is to record at the node + clusterConfig, err = services.NewClusterConfig(services.ClusterConfigSpecV3{}) + c.Assert(err, check.IsNil) + recordingType := clusterConfig.GetSessionRecording() + c.Assert(recordingType, check.Equals, services.RecordAtNode) + + // update sessions to be recorded at the proxy and check again + clusterConfig.SetSessionRecording(services.RecordAtProxy) + recordingType = clusterConfig.GetSessionRecording() + c.Assert(recordingType, check.Equals, services.RecordAtProxy) +} diff --git a/lib/services/resource.go b/lib/services/resource.go index 8b27258bb084a..500e22c389548 100644 --- a/lib/services/resource.go +++ b/lib/services/resource.go @@ -113,6 +113,12 @@ const ( // KindAuthPreference is the type of authentication for this cluster. MetaNameClusterAuthPreference = "cluster-auth-preference" + // KindClusterConfig is the resource that holds cluster level configuration. + KindClusterConfig = "cluster_config" + + // MetaNameClusterName is the exact name of the singleton resource. + MetaNameClusterConfig = "cluster-config" + // KindClusterName is a type of configuration resource that contains the cluster name. KindClusterName = "cluster_name" diff --git a/lib/srv/sshserver_test.go b/lib/srv/sshserver_test.go index 0a72d6f3f840d..4dc43c2473aef 100644 --- a/lib/srv/sshserver_test.go +++ b/lib/srv/sshserver_test.go @@ -93,7 +93,7 @@ func (s *SrvSuite) SetUpTest(c *C) { var err error s.dir = c.MkDir() - s.alog, err = events.NewAuditLog(s.dir) + s.alog, err = events.NewAuditLog(s.dir, true) c.Assert(err, IsNil) u, err := user.Current() diff --git a/lib/state/cachingaccesspoint.go b/lib/state/cachingaccesspoint.go index e46747e8b0b06..f7f660d36cac5 100644 --- a/lib/state/cachingaccesspoint.go +++ b/lib/state/cachingaccesspoint.go @@ -55,7 +55,7 @@ func init() { type CachingAuthClient struct { Config *log.Entry - // ap points to the access ponit we're caching access to: + // ap points to the access point we're caching access to: ap auth.AccessPoint // lastErrorTime is a timestamp of the last error when talking to the AP @@ -65,6 +65,7 @@ type CachingAuthClient struct { access services.Access trust services.Trust presence services.Presence + config services.ClusterConfiguration } // Config is CachingAuthClient config @@ -113,6 +114,7 @@ func NewCachingAuthClient(config Config) (*CachingAuthClient, error) { trust: local.NewCAService(config.Backend), access: local.NewAccessService(config.Backend), presence: local.NewPresenceService(config.Backend), + config: local.NewClusterConfigurationService(config.Backend), Entry: log.WithFields(log.Fields{ trace.Component: teleport.ComponentCachingClient, }), @@ -133,6 +135,8 @@ func (cs *CachingAuthClient) fetchAll() error { var errors []error _, err := cs.GetDomainName() errors = append(errors, err) + _, err = cs.GetClusterConfig() + errors = append(errors, err) _, err = cs.GetRoles() errors = append(errors, err) namespaces, err := cs.GetNamespaces() @@ -188,6 +192,23 @@ func (cs *CachingAuthClient) GetDomainName() (clusterName string, err error) { return clusterName, err } +func (cs *CachingAuthClient) GetClusterConfig() (clusterConfig services.ClusterConfig, err error) { + err = cs.try(func() error { + clusterConfig, err = cs.ap.GetClusterConfig() + return err + }) + if err != nil { + if trace.IsConnectionProblem(err) { + return cs.config.GetClusterConfig() + } + return nil, trace.Wrap(err) + } + if err = cs.config.SetClusterConfig(clusterConfig); err != nil { + return nil, trace.Wrap(err) + } + return clusterConfig, nil +} + // GetRoles is a part of auth.AccessPoint implementation func (cs *CachingAuthClient) GetRoles() (roles []services.Role, err error) { err = cs.try(func() error { diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index e9c2151ff908d..1d2ca680c618b 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -117,7 +117,7 @@ func (s *WebSuite) SetUpSuite(c *C) { sessionStreamPollPeriod = time.Millisecond s.logDir = c.MkDir() - s.auditLog, err = events.NewAuditLog(s.logDir) + s.auditLog, err = events.NewAuditLog(s.logDir, true) c.Assert(err, IsNil) c.Assert(s.auditLog, NotNil) s.mockU2F, err = mocku2f.Create()