diff --git a/CHANGELOG.md b/CHANGELOG.md index 138c9508c04..d2daf9aeb6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - During a network upgrade, log migration progress every 2 seconds so they are more helpful and informative. The `LOTUS_MIGRATE_PROGRESS_LOG_SECONDS` environment variable can be used to change this if needed. ([filecoin-project/lotus#12732](https://github.com/filecoin-project/lotus/pull/12732)) - Add F3GetCertificate & F3GetLatestCertificate to the gateway. ([filecoin-project/lotus#12778](https://github.com/filecoin-project/lotus/pull/12778)) - Add Magik's bootstrap node. ([filecoin-project/lotus#12792](https://github.com/filecoin-project/lotus/pull/12792)) +- Lotus now reports the network name as a tag in most metrics. Some untagged metrics will be completed in a follow-up at a later date. ([filecoin-project/lotus#12733](https://github.com/filecoin-project/lotus/pull/12733)) # UNRELEASED v.1.32.0 diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 7f5049caf04..a3b93599f00 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -266,7 +266,11 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co ss.txnViewsCond.L = &ss.txnViewsMx ss.txnSyncCond.L = &ss.txnSyncMx ss.chainSyncCond.L = &ss.chainSyncMx - ss.ctx, ss.cancel = context.WithCancel(context.Background()) + + baseCtx := context.Background() + ctx := metrics.AddNetworkTag(baseCtx) + + ss.ctx, ss.cancel = context.WithCancel(ctx) ss.reifyCond.L = &ss.reifyMx ss.reifyPend = make(map[cid.Cid]struct{}) diff --git a/cmd/lotus-miner/run.go b/cmd/lotus-miner/run.go index e09968165b9..be670287e1a 100644 --- a/cmd/lotus-miner/run.go +++ b/cmd/lotus-miner/run.go @@ -60,6 +60,7 @@ var runCmd = &cli.Command{ tag.Insert(metrics.Commit, build.CurrentCommit), tag.Insert(metrics.NodeType, "miner"), ) + ctx = metrics.AddNetworkTag(ctx) // Register all metric views if err := view.Register( metrics.MinerNodeViews..., diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index b7fbd63e695..97d2294b79b 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -207,12 +207,12 @@ var DaemonCmd = &cli.Command{ default: return fmt.Errorf("unrecognized profile type: %q", profile) } - ctx, _ := tag.New(context.Background(), tag.Insert(metrics.Version, build.NodeBuildVersion), tag.Insert(metrics.Commit, build.CurrentCommit), tag.Insert(metrics.NodeType, "chain"), ) + ctx = metrics.AddNetworkTag(ctx) // Register all metric views if err = view.Register( metrics.ChainNodeViews..., diff --git a/metrics/metrics.go b/metrics/metrics.go index c47642dc4a2..75d4b29491f 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -11,6 +11,7 @@ import ( rpcmetrics "github.com/filecoin-project/go-jsonrpc/metrics" "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build/buildconstants" ) // Distribution @@ -30,6 +31,7 @@ var ( Version, _ = tag.NewKey("version") Commit, _ = tag.NewKey("commit") NodeType, _ = tag.NewKey("node_type") + Network, _ = tag.NewKey("network") PeerID, _ = tag.NewKey("peer_id") MinerID, _ = tag.NewKey("miner_id") FailureType, _ = tag.NewKey("failure_type") @@ -192,40 +194,46 @@ var ( Description: "Lotus node information", Measure: LotusInfo, Aggregation: view.LastValue(), - TagKeys: []tag.Key{Version, Commit, NodeType}, + TagKeys: []tag.Key{Version, Commit, NodeType, Network}, } ChainNodeHeightView = &view.View{ Measure: ChainNodeHeight, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } ChainNodeHeightExpectedView = &view.View{ Measure: ChainNodeHeightExpected, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } ChainNodeWorkerHeightView = &view.View{ Measure: ChainNodeWorkerHeight, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } BlockReceivedView = &view.View{ Measure: BlockReceived, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } BlockValidationFailureView = &view.View{ Measure: BlockValidationFailure, Aggregation: view.Count(), - TagKeys: []tag.Key{FailureType}, + TagKeys: []tag.Key{FailureType, Network}, } BlockValidationSuccessView = &view.View{ Measure: BlockValidationSuccess, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } BlockValidationDurationView = &view.View{ Measure: BlockValidationDurationMilliseconds, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } BlockDelayView = &view.View{ Measure: BlockDelay, - TagKeys: []tag.Key{MinerID}, + TagKeys: []tag.Key{MinerID, Network}, Aggregation: func() *view.Aggregation { var bounds []float64 for i := 5; i < 29; i++ { // 5-29s, step 1s @@ -244,398 +252,461 @@ var ( IndexerMessageValidationFailureView = &view.View{ Measure: IndexerMessageValidationFailure, Aggregation: view.Count(), - TagKeys: []tag.Key{FailureType, Local}, + TagKeys: []tag.Key{FailureType, Local, Network}, } IndexerMessageValidationSuccessView = &view.View{ Measure: IndexerMessageValidationSuccess, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } MessagePublishedView = &view.View{ Measure: MessagePublished, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } MessageReceivedView = &view.View{ Measure: MessageReceived, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } MessageValidationFailureView = &view.View{ Measure: MessageValidationFailure, Aggregation: view.Count(), - TagKeys: []tag.Key{FailureType, Local}, + TagKeys: []tag.Key{FailureType, Local, Network}, } MessageValidationSuccessView = &view.View{ Measure: MessageValidationSuccess, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } MessageValidationDurationView = &view.View{ Measure: MessageValidationDuration, Aggregation: defaultMillisecondsDistribution, - TagKeys: []tag.Key{MsgValid, Local}, + TagKeys: []tag.Key{MsgValid, Local, Network}, } MpoolGetNonceDurationView = &view.View{ Measure: MpoolGetNonceDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } MpoolGetBalanceDurationView = &view.View{ Measure: MpoolGetBalanceDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } MpoolAddTsDurationView = &view.View{ Measure: MpoolAddTsDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } MpoolAddDurationView = &view.View{ Measure: MpoolAddDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } MpoolPushDurationView = &view.View{ Measure: MpoolPushDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } MpoolMessageCountView = &view.View{ Measure: MpoolMessageCount, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } PeerCountView = &view.View{ Measure: PeerCount, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } PubsubPublishMessageView = &view.View{ Measure: PubsubPublishMessage, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } PubsubDeliverMessageView = &view.View{ Measure: PubsubDeliverMessage, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } PubsubRejectMessageView = &view.View{ Measure: PubsubRejectMessage, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } PubsubDuplicateMessageView = &view.View{ Measure: PubsubDuplicateMessage, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } PubsubPruneMessageView = &view.View{ Measure: PubsubPruneMessage, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } PubsubRecvRPCView = &view.View{ Measure: PubsubRecvRPC, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } PubsubSendRPCView = &view.View{ Measure: PubsubSendRPC, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } PubsubDropRPCView = &view.View{ Measure: PubsubDropRPC, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } APIRequestDurationView = &view.View{ Measure: APIRequestDuration, Aggregation: defaultMillisecondsDistribution, - TagKeys: []tag.Key{APIInterface, Endpoint}, + TagKeys: []tag.Key{APIInterface, Endpoint, Network}, } VMFlushCopyDurationView = &view.View{ Measure: VMFlushCopyDuration, Aggregation: view.Sum(), + TagKeys: []tag.Key{Network}, } VMFlushCopyCountView = &view.View{ Measure: VMFlushCopyCount, Aggregation: view.Sum(), + TagKeys: []tag.Key{Network}, } VMApplyBlocksTotalView = &view.View{ Measure: VMApplyBlocksTotal, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } VMApplyMessagesView = &view.View{ Measure: VMApplyMessages, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } VMApplyEarlyView = &view.View{ Measure: VMApplyEarly, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } VMApplyCronView = &view.View{ Measure: VMApplyCron, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } VMApplyFlushView = &view.View{ Measure: VMApplyFlush, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } VMSendsView = &view.View{ Measure: VMSends, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } VMAppliedView = &view.View{ Measure: VMApplied, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } VMExecutionWaitingView = &view.View{ Measure: VMExecutionWaiting, Aggregation: view.Sum(), - TagKeys: []tag.Key{ExecutionLane}, + TagKeys: []tag.Key{ExecutionLane, Network}, } VMExecutionRunningView = &view.View{ Measure: VMExecutionRunning, Aggregation: view.Sum(), - TagKeys: []tag.Key{ExecutionLane}, + TagKeys: []tag.Key{ExecutionLane, Network}, } // miner WorkerCallsStartedView = &view.View{ Measure: WorkerCallsStarted, Aggregation: view.Count(), - TagKeys: []tag.Key{TaskType, WorkerHostname}, + TagKeys: []tag.Key{TaskType, WorkerHostname, Network}, } WorkerCallsReturnedCountView = &view.View{ Measure: WorkerCallsReturnedCount, Aggregation: view.Count(), - TagKeys: []tag.Key{TaskType, WorkerHostname}, + + TagKeys: []tag.Key{TaskType, WorkerHostname, Network}, } WorkerUntrackedCallsReturnedView = &view.View{ - Measure: WorkerUntrackedCallsReturned, + Measure: WorkerUntrackedCallsReturned, + Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } WorkerCallsReturnedDurationView = &view.View{ Measure: WorkerCallsReturnedDuration, Aggregation: workMillisecondsDistribution, - TagKeys: []tag.Key{TaskType, WorkerHostname}, + TagKeys: []tag.Key{TaskType, WorkerHostname, Network}, } SectorStatesView = &view.View{ Measure: SectorStates, Aggregation: view.LastValue(), - TagKeys: []tag.Key{SectorState}, + TagKeys: []tag.Key{SectorState, Network}, } StorageFSAvailableView = &view.View{ Measure: StorageFSAvailable, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageAvailableView = &view.View{ Measure: StorageAvailable, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageReservedView = &view.View{ Measure: StorageReserved, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageLimitUsedView = &view.View{ Measure: StorageLimitUsed, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageCapacityBytesView = &view.View{ Measure: StorageCapacityBytes, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageFSAvailableBytesView = &view.View{ Measure: StorageFSAvailableBytes, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageAvailableBytesView = &view.View{ Measure: StorageAvailableBytes, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageReservedBytesView = &view.View{ Measure: StorageReservedBytes, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageLimitUsedBytesView = &view.View{ Measure: StorageLimitUsedBytes, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } StorageLimitMaxBytesView = &view.View{ Measure: StorageLimitMaxBytes, Aggregation: view.LastValue(), - TagKeys: []tag.Key{StorageID, PathStorage, PathSeal}, + TagKeys: []tag.Key{StorageID, PathStorage, PathSeal, Network}, } SchedAssignerCycleDurationView = &view.View{ Measure: SchedAssignerCycleDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } SchedAssignerCandidatesDurationView = &view.View{ Measure: SchedAssignerCandidatesDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } SchedAssignerWindowSelectionDurationView = &view.View{ Measure: SchedAssignerWindowSelectionDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } SchedAssignerSubmitDurationView = &view.View{ Measure: SchedAssignerSubmitDuration, Aggregation: defaultMillisecondsDistribution, + TagKeys: []tag.Key{Network}, } SchedCycleOpenWindowsView = &view.View{ Measure: SchedCycleOpenWindows, Aggregation: queueSizeDistribution, + TagKeys: []tag.Key{Network}, } SchedCycleQueueSizeView = &view.View{ Measure: SchedCycleQueueSize, Aggregation: queueSizeDistribution, + TagKeys: []tag.Key{Network}, } DagStorePRInitCountView = &view.View{ Measure: DagStorePRInitCount, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } DagStorePRBytesRequestedView = &view.View{ Measure: DagStorePRBytesRequested, Aggregation: view.Sum(), - TagKeys: []tag.Key{PRReadType}, + TagKeys: []tag.Key{PRReadType, Network}, } DagStorePRBytesDiscardedView = &view.View{ Measure: DagStorePRBytesDiscarded, Aggregation: view.Sum(), + TagKeys: []tag.Key{Network}, } DagStorePRDiscardCountView = &view.View{ Measure: DagStorePRDiscardCount, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } DagStorePRSeekBackCountView = &view.View{ Measure: DagStorePRSeekBackCount, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } DagStorePRSeekForwardCountView = &view.View{ Measure: DagStorePRSeekForwardCount, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } DagStorePRSeekBackBytesView = &view.View{ Measure: DagStorePRSeekBackBytes, Aggregation: view.Sum(), + TagKeys: []tag.Key{Network}, } DagStorePRSeekForwardBytesView = &view.View{ Measure: DagStorePRSeekForwardBytes, Aggregation: view.Sum(), + TagKeys: []tag.Key{Network}, } DagStorePRAtHitBytesView = &view.View{ Measure: DagStorePRAtHitBytes, Aggregation: view.Sum(), + TagKeys: []tag.Key{Network}, } DagStorePRAtHitCountView = &view.View{ Measure: DagStorePRAtHitCount, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } DagStorePRAtCacheFillCountView = &view.View{ Measure: DagStorePRAtCacheFillCount, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } DagStorePRAtReadBytesView = &view.View{ Measure: DagStorePRAtReadBytes, Aggregation: view.Sum(), - TagKeys: []tag.Key{PRReadSize}, + TagKeys: []tag.Key{PRReadSize, Network}, } DagStorePRAtReadCountView = &view.View{ Measure: DagStorePRAtReadCount, Aggregation: view.Count(), - TagKeys: []tag.Key{PRReadSize}, + TagKeys: []tag.Key{PRReadSize, Network}, } // splitstore SplitstoreMissView = &view.View{ Measure: SplitstoreMiss, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } SplitstoreCompactionTimeSecondsView = &view.View{ Measure: SplitstoreCompactionTimeSeconds, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } SplitstoreCompactionHotView = &view.View{ Measure: SplitstoreCompactionHot, Aggregation: view.LastValue(), + TagKeys: []tag.Key{Network}, } SplitstoreCompactionColdView = &view.View{ Measure: SplitstoreCompactionCold, Aggregation: view.Sum(), + TagKeys: []tag.Key{Network}, } SplitstoreCompactionDeadView = &view.View{ Measure: SplitstoreCompactionDead, Aggregation: view.Sum(), + TagKeys: []tag.Key{Network}, } // rcmgr RcmgrAllowConnView = &view.View{ Measure: RcmgrAllowConn, Aggregation: view.Count(), - TagKeys: []tag.Key{Direction, UseFD}, + TagKeys: []tag.Key{Direction, UseFD, Network}, } RcmgrBlockConnView = &view.View{ Measure: RcmgrBlockConn, Aggregation: view.Count(), - TagKeys: []tag.Key{Direction, UseFD}, + TagKeys: []tag.Key{Direction, UseFD, Network}, } RcmgrAllowStreamView = &view.View{ Measure: RcmgrAllowStream, Aggregation: view.Count(), - TagKeys: []tag.Key{PeerID, Direction}, + TagKeys: []tag.Key{PeerID, Direction, Network}, } RcmgrBlockStreamView = &view.View{ Measure: RcmgrBlockStream, Aggregation: view.Count(), - TagKeys: []tag.Key{PeerID, Direction}, + TagKeys: []tag.Key{PeerID, Direction, Network}, } RcmgrAllowPeerView = &view.View{ Measure: RcmgrAllowPeer, Aggregation: view.Count(), - TagKeys: []tag.Key{PeerID}, + TagKeys: []tag.Key{PeerID, Network}, } RcmgrBlockPeerView = &view.View{ Measure: RcmgrBlockPeer, Aggregation: view.Count(), - TagKeys: []tag.Key{PeerID}, + + TagKeys: []tag.Key{PeerID, Network}, } RcmgrAllowProtoView = &view.View{ Measure: RcmgrAllowProto, Aggregation: view.Count(), - TagKeys: []tag.Key{ProtocolID}, + + TagKeys: []tag.Key{ProtocolID, Network}, } RcmgrBlockProtoView = &view.View{ Measure: RcmgrBlockProto, Aggregation: view.Count(), - TagKeys: []tag.Key{ProtocolID}, + + TagKeys: []tag.Key{ProtocolID, Network}, } RcmgrBlockProtoPeerView = &view.View{ Measure: RcmgrBlockProtoPeer, Aggregation: view.Count(), - TagKeys: []tag.Key{ProtocolID, PeerID}, + + TagKeys: []tag.Key{ProtocolID, PeerID, Network}, } RcmgrAllowSvcView = &view.View{ Measure: RcmgrAllowSvc, Aggregation: view.Count(), - TagKeys: []tag.Key{ServiceID}, + + TagKeys: []tag.Key{ServiceID, Network}, } RcmgrBlockSvcView = &view.View{ Measure: RcmgrBlockSvc, Aggregation: view.Count(), - TagKeys: []tag.Key{ServiceID}, + + TagKeys: []tag.Key{ServiceID, Network}, } RcmgrBlockSvcPeerView = &view.View{ Measure: RcmgrBlockSvcPeer, Aggregation: view.Count(), - TagKeys: []tag.Key{ServiceID, PeerID}, + + TagKeys: []tag.Key{ServiceID, PeerID, Network}, } RcmgrAllowMemView = &view.View{ Measure: RcmgrAllowMem, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } RcmgrBlockMemView = &view.View{ Measure: RcmgrBlockMem, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } RateLimitedView = &view.View{ Measure: RateLimitCount, Aggregation: view.Count(), + TagKeys: []tag.Key{Network}, } ) @@ -781,3 +852,8 @@ func Timer(ctx context.Context, m *stats.Float64Measure) func() time.Duration { return time.Since(start) } } + +func AddNetworkTag(ctx context.Context) context.Context { + ctx, _ = tag.New(ctx, tag.Upsert(Network, buildconstants.NetworkBundle)) + return ctx +} diff --git a/node/modules/lp2p/rcmgr.go b/node/modules/lp2p/rcmgr.go index 75a09068cc7..337eb21a651 100644 --- a/node/modules/lp2p/rcmgr.go +++ b/node/modules/lp2p/rcmgr.go @@ -179,6 +179,7 @@ type rcmgrMetrics struct{} func (r rcmgrMetrics) AllowConn(dir network.Direction, usefd bool) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) if dir == network.DirInbound { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Direction, "inbound")) } else { @@ -194,6 +195,7 @@ func (r rcmgrMetrics) AllowConn(dir network.Direction, usefd bool) { func (r rcmgrMetrics) BlockConn(dir network.Direction, usefd bool) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) if dir == network.DirInbound { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Direction, "inbound")) } else { @@ -209,6 +211,7 @@ func (r rcmgrMetrics) BlockConn(dir network.Direction, usefd bool) { func (r rcmgrMetrics) AllowStream(p peer.ID, dir network.Direction) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) if dir == network.DirInbound { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Direction, "inbound")) } else { @@ -219,6 +222,7 @@ func (r rcmgrMetrics) AllowStream(p peer.ID, dir network.Direction) { func (r rcmgrMetrics) BlockStream(p peer.ID, dir network.Direction) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) if dir == network.DirInbound { ctx, _ = tag.New(ctx, tag.Upsert(metrics.Direction, "inbound")) } else { @@ -229,46 +233,54 @@ func (r rcmgrMetrics) BlockStream(p peer.ID, dir network.Direction) { func (r rcmgrMetrics) AllowPeer(p peer.ID) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) stats.Record(ctx, metrics.RcmgrAllowPeer.M(1)) } func (r rcmgrMetrics) BlockPeer(p peer.ID) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) stats.Record(ctx, metrics.RcmgrBlockPeer.M(1)) } func (r rcmgrMetrics) AllowProtocol(proto protocol.ID) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) ctx, _ = tag.New(ctx, tag.Upsert(metrics.ProtocolID, string(proto))) stats.Record(ctx, metrics.RcmgrAllowProto.M(1)) } func (r rcmgrMetrics) BlockProtocol(proto protocol.ID) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) ctx, _ = tag.New(ctx, tag.Upsert(metrics.ProtocolID, string(proto))) stats.Record(ctx, metrics.RcmgrBlockProto.M(1)) } func (r rcmgrMetrics) BlockProtocolPeer(proto protocol.ID, p peer.ID) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) ctx, _ = tag.New(ctx, tag.Upsert(metrics.ProtocolID, string(proto))) stats.Record(ctx, metrics.RcmgrBlockProtoPeer.M(1)) } func (r rcmgrMetrics) AllowService(svc string) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) ctx, _ = tag.New(ctx, tag.Upsert(metrics.ServiceID, svc)) stats.Record(ctx, metrics.RcmgrAllowSvc.M(1)) } func (r rcmgrMetrics) BlockService(svc string) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) ctx, _ = tag.New(ctx, tag.Upsert(metrics.ServiceID, svc)) stats.Record(ctx, metrics.RcmgrBlockSvc.M(1)) } func (r rcmgrMetrics) BlockServicePeer(svc string, p peer.ID) { ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) ctx, _ = tag.New(ctx, tag.Upsert(metrics.ServiceID, svc)) stats.Record(ctx, metrics.RcmgrBlockSvcPeer.M(1)) } diff --git a/node/modules/paych.go b/node/modules/paych.go index 4f93bbd6c55..f5cce6a5612 100644 --- a/node/modules/paych.go +++ b/node/modules/paych.go @@ -8,6 +8,7 @@ import ( "go.uber.org/fx" "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -17,7 +18,7 @@ import ( func NewManager(mctx helpers.MetricsCtx, lc fx.Lifecycle, sm stmgr.StateManagerAPI, pchstore *paychmgr.Store, api paychmgr.PaychAPI) *paychmgr.Manager { ctx := helpers.LifecycleCtx(mctx, lc) ctx, shutdown := context.WithCancel(ctx) - + ctx = metrics.AddNetworkTag(ctx) return paychmgr.NewManager(ctx, shutdown, sm, pchstore, api) } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index d965d59ebce..7e9482db67f 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -33,6 +33,7 @@ import ( "github.com/filecoin-project/lotus/chain/lf3" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" + "github.com/filecoin-project/lotus/metrics" lotusminer "github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -289,7 +290,7 @@ func WindowPostScheduler(fc config.MinerFeeConfig, pc config.ProvingConfig) func ) ctx := helpers.LifecycleCtx(mctx, lc) - + ctx = metrics.AddNetworkTag(ctx) fps, err := wdpost.NewWindowedPoStScheduler(api, fc, pc, as, sealer, verif, sealer, j, []dtypes.MinerAddress{params.Maddr}) if err != nil { diff --git a/node/rpc.go b/node/rpc.go index ede1b924cd4..bcf78799ff9 100644 --- a/node/rpc.go +++ b/node/rpc.go @@ -48,7 +48,9 @@ func ServeRPC(h http.Handler, id string, addr multiaddr.Multiaddr) (StopFunc, er Handler: h, ReadHeaderTimeout: 30 * time.Second, BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, id)) + ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) + ctx, _ = tag.New(ctx, tag.Upsert(metrics.APIInterface, id)) return ctx }, } diff --git a/paychmgr/manager.go b/paychmgr/manager.go index 97073801272..13876e1d490 100644 --- a/paychmgr/manager.go +++ b/paychmgr/manager.go @@ -20,6 +20,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/metrics" ) var log = logging.Logger("paych") @@ -82,13 +83,16 @@ func NewManager(ctx context.Context, shutdown func(), sm stmgr.StateManagerAPI, // newManager is used by the tests to supply mocks func newManager(pchstore *Store, pchapi managerAPI) (*Manager, error) { + ctx := context.Background() + ctx = metrics.AddNetworkTag(ctx) + pm := &Manager{ store: pchstore, sa: &stateAccessor{sm: pchapi}, channels: make(map[string]*channelAccessor), pchapi: pchapi, } - pm.ctx, pm.shutdown = context.WithCancel(context.Background()) + pm.ctx, pm.shutdown = context.WithCancel(ctx) return pm, pm.Start() }