Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Send available perfmon data on error #6542

Merged
merged 3 commits into from
Mar 15, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ https://github.com/elastic/beats/compare/v6.0.0-beta2...master[Check the HEAD di
- Fix system.filesystem.used.pct value to match what df reports. {issue}5494[5494]
- Fix namespace disambiguation in Kubernetes state_* metricsets. {issue}6281[6281]
- Fix Kubernetes overview dashboard views for non default time ranges. {issue}6395{6395}
- Fix Windows perfmon metricset so that it sends metrics when an error occurs. {pull}6542[6542]

*Packetbeat*

Expand Down
50 changes: 43 additions & 7 deletions metricbeat/mb/testing/modules.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,42 @@ func ReportingFetch(metricSet mb.ReportingMetricSet) ([]common.MapStr, []error)
return r.events, r.errs
}

// NewReportingMetricSetV2 returns a new ReportingMetricSetV2 instance. Then
// you can use ReportingFetchV2 to perform a Fetch operation with the MetricSet.
func NewReportingMetricSetV2(t testing.TB, config interface{}) mb.ReportingMetricSetV2 {
metricSet := newMetricSet(t, config)

reportingMetricSetV2, ok := metricSet.(mb.ReportingMetricSetV2)
if !ok {
t.Fatal("MetricSet does not implement ReportingMetricSetV2")
}

return reportingMetricSetV2
}

type capturingReporterV2 struct {
events []mb.Event
errs []error
}

func (r *capturingReporterV2) Event(event mb.Event) bool {
r.events = append(r.events, event)
return true
}

func (r *capturingReporterV2) Error(err error) bool {
r.errs = append(r.errs, err)
return true
}

// ReportingFetchV2 runs the given reporting metricset and returns all of the
// events and errors that occur during that period.
func ReportingFetchV2(metricSet mb.ReportingMetricSetV2) ([]mb.Event, []error) {
r := &capturingReporterV2{}
metricSet.Fetch(r)
return r.events, r.errs
}

// NewPushMetricSet instantiates a new PushMetricSet using the given
// configuration. The ModuleFactory and MetricSetFactory are obtained from the
// global Registry.
Expand Down Expand Up @@ -217,16 +253,16 @@ func NewPushMetricSetV2(t testing.TB, config interface{}) mb.PushMetricSetV2 {
return pushMetricSet
}

// capturingReporterV2 stores all the events and errors from a metricset's
// capturingPushReporterV2 stores all the events and errors from a metricset's
// Run method.
type capturingReporterV2 struct {
type capturingPushReporterV2 struct {
doneC chan struct{}
eventsC chan mb.Event
}

// report writes an event to the output channel and returns true. If the output
// is closed it returns false.
func (r *capturingReporterV2) report(event mb.Event) bool {
func (r *capturingPushReporterV2) report(event mb.Event) bool {
select {
case <-r.doneC:
// Publisher is stopped.
Expand All @@ -237,25 +273,25 @@ func (r *capturingReporterV2) report(event mb.Event) bool {
}

// Event stores the passed-in event into the events array
func (r *capturingReporterV2) Event(event mb.Event) bool {
func (r *capturingPushReporterV2) Event(event mb.Event) bool {
return r.report(event)
}

// Error stores the given error into the errors array.
func (r *capturingReporterV2) Error(err error) bool {
func (r *capturingPushReporterV2) Error(err error) bool {
return r.report(mb.Event{Error: err})
}

// Done returns the Done channel for this reporter.
func (r *capturingReporterV2) Done() <-chan struct{} {
func (r *capturingPushReporterV2) Done() <-chan struct{} {
return r.doneC
}

// RunPushMetricSetV2 run the given push metricset for the specific amount of
// time and returns all of the events and errors that occur during that period.
func RunPushMetricSetV2(timeout time.Duration, waitEvents int, metricSet mb.PushMetricSetV2) []mb.Event {
var (
r = &capturingReporterV2{doneC: make(chan struct{}), eventsC: make(chan mb.Event)}
r = &capturingPushReporterV2{doneC: make(chan struct{}), eventsC: make(chan mb.Event)}
wg sync.WaitGroup
events []mb.Event
)
Expand Down
18 changes: 3 additions & 15 deletions metricbeat/module/windows/perfmon/_meta/data.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"@timestamp": "2016-05-23T08:05:34.853Z",
"@timestamp": "2017-10-12T08:05:34.853Z",
"beat": {
"hostname": "host.example.com",
"name": "host.example.com"
Expand All @@ -9,25 +9,13 @@
"name": "perfmon",
"rtt": 115
},
"type": "metricsets",
"windows": {
"perfmon": {
"disk": {
"bytes": {
"read": {
"total": 0
}
}
},
"processor": {
"name": "_Total",
"time": {
"idle": {
"average": {
"ns": 670661.5894039735
}
},
"total": {
"pct": 3.135058464112306
"pct": 1.4663385364361736
}
}
}
Expand Down
79 changes: 61 additions & 18 deletions metricbeat/module/windows/perfmon/_meta/docs.asciidoc
Original file line number Diff line number Diff line change
@@ -1,33 +1,76 @@
The `perfmon` metricset of the Windows module reads Windows
performance counters.
The `perfmon` metricset of the Windows module reads Windows performance
counters.

[float]
=== Configuration

You must configure queries for the Windows performance counters that you wish
to collect. The example below collects processor time and disk writes.
`ignore_non_existent_counters` ignores failures for non-existent counters without
to interrupt the service. With `format` you can set the output format for a specific counter.
Possible values are `float` and `long`. If nothing is selected the default value is `float`.
With `instance_name`, you can specify the name of the instance. Use this setting when:
- You want to use an instance name that is different from the computed name. For example, `Total` instead of `_Total`.
- You specify a counter that has no instance. For example, `\TCPIP Performance Diagnostics\IPv4 NBLs/sec indicated without prevalidation`.
For wildcard queries this setting has no effect.

to collect. The example below collects processor time and disk writes every
10 seconds. If either of the counters do not exist it will ignore the error.

[source,yaml]
----
- module: windows
metricsets: ["perfmon"]
metricsets: [perfmon]
period: 10s
perfmon.ignore_non_existent_counters: true
perfmon.counters:
- instance_label: "processor.name"
instance_name: "Total"
measurement_label: "processor.time.total.pct"
- instance_label: processor.name
instance_name: total
measurement_label: processor.time.total.pct
query: '\Processor Information(_Total)\% Processor Time'
- instance_label: "diskio.name"
measurement_label: "diskio.write.bytes"

- instance_label: physical_disk.name
measurement_label: physical_disk.write.per_sec
query: '\PhysicalDisk(*)\Disk Writes/sec'
format: "long"

- instance_label: physical_disk.name
measurement_label: physical_disk.write.time.pct
query: '\PhysicalDisk(*)\% Disk Write Time'
----

*`ignore_non_existent_counters`*:: A boolean option that causes the
metricset to ignore errors caused by counters that do not exist when set to
true. Instead of an error, a message will be logged at the info level stating
that the counter does not exist.

*`counters`*:: Counters specifies a list of queries to perform. Each individual
counter requires three config options - `instance_label`, `measurement_label`,
and `query`.

[float]
==== Counter Configuration

Each item in the `counters` list specifies a perfmon query to perform. In the
events generated by the metricset these configuration options map to the field
values as shown below.

----
"%[instance_label]": "%[instance_name] or <perfmon_counter_name>",
"%[measurement_label]": <perfmon_counter_value>,
----

*`instance_label`*:: The label used to identify the counter instance. This
field is required.

*`instance_name`*:: The instance name to use in the event when the counter's
path (`query`) does not include an instance or when you want to override the
instance name. For example with `\Processor Information(_Total)` the
instance name would be `_Total` and by setting `instance_name: total` you can
override the value.
+
The setting has no effect with wildcard queries (e.g.
`\PhysicalDisk(*)\Disk Writes/sec`).

*`measurement_label`*:: The label used for the value returned by the query.
This field is required.

*`query`*:: The perfmon query. This is the counter path specified in
Performance Data Helper (PDH) syntax. This field is required. For example
`\Processor Information(_Total)\% Processor Time`. An asterisk can be used in
place of an instance name to perform a wildcard query that generates an event
for each counter instance (e.g. `\PhysicalDisk(*)\Disk Writes/sec`).

*`format`*:: Format of the measurement value. The value can be either `float` or
`long`. The default is `float`.

25 changes: 15 additions & 10 deletions metricbeat/module/windows/perfmon/pdh_integration_windows_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,11 @@ import (
"time"
"unsafe"

mbtest "github.com/elastic/beats/metricbeat/mb/testing"

"github.com/pkg/errors"
"github.com/stretchr/testify/assert"

"github.com/elastic/beats/metricbeat/mb"
mbtest "github.com/elastic/beats/metricbeat/mb/testing"
)

const processorTimeCounter = `\Processor Information(_Total)\% Processor Time`
Expand Down Expand Up @@ -38,16 +39,20 @@ func TestData(t *testing.T) {
},
}

f := mbtest.NewEventsFetcher(t, config)

f.Fetch()

ms := mbtest.NewReportingMetricSetV2(t, config)
mbtest.ReportingFetchV2(ms)
time.Sleep(60 * time.Millisecond)

err := mbtest.WriteEvents(f, t)
if err != nil {
t.Fatal("write", err)
events, errs := mbtest.ReportingFetchV2(ms)
if len(errs) > 0 {
t.Fatal(errs)
}
if len(events) == 0 {
t.Fatal("no events received")
}

beatEvent := mbtest.StandardizeEvent(ms, events[0], mb.AddMetricSetInfo)
mbtest.WriteEventToDataJSON(t, beatEvent)
}

func TestQuery(t *testing.T) {
Expand Down Expand Up @@ -306,7 +311,7 @@ func TestWildcardQuery(t *testing.T) {
t.Fatal(err)
}

pctKey, err := values[0].HasKey("processor.time.pct")
pctKey, err := values[0].MetricSetFields.HasKey("processor.time.pct")
if err != nil {
t.Fatal(err)
}
Expand Down
Loading