diff --git a/.circleci/config.yml b/.circleci/config.yml index dafbf4db41a27..5188242c05ef6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -42,6 +42,11 @@ commands: equal: [ "386", << parameters.arch >> ] steps: - run: echo 'export RACE="-race"' >> $BASH_ENV + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - run: echo 'export CGO_ENABLED=1' >> $BASH_ENV - run: | GOARCH=<< parameters.arch >> ./<< parameters.gotestsum >> -- ${RACE} -short ./... package-build: @@ -170,7 +175,8 @@ jobs: - run: git config --system core.longpaths true - run: choco feature enable -n allowGlobalConfirmation - run: 'sh ./scripts/installgo_windows.sh' - - run: choco install mingw --version=12.2.0.03042023 + - run: choco install mingw + - run: echo 'export PATH="$PATH:/c/ProgramData/mingw64/mingw64/bin"' >> $BASH_ENV - test-go: os: windows gotestsum: "gotestsum.exe" diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index b45a6f9d26cd7..315f818c10f66 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -54,7 +54,7 @@ jobs: # Run Linter against code base # ################################ - name: Lint Code Base - uses: super-linter/super-linter@v6.0.0 + uses: super-linter/super-linter@v6.1.1 env: VALIDATE_ALL_CODEBASE: false DEFAULT_BRANCH: master diff --git a/CHANGELOG.md b/CHANGELOG.md index fd444d464a460..e94c6e3f8689d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,54 @@ the `server` tag value for a URI-format address might change in case it contains spaces, backslashes or single-quotes in non-redacted parameters. +## v1.29.5 [2024-02-20] + +### Bugfixes + +- [#14669](https://github.com/influxdata/telegraf/pull/14669) `inputs.filecount` Respect symlink files with FollowSymLinks +- [#14838](https://github.com/influxdata/telegraf/pull/14838) `inputs.gnmi` Normalize path for inline origin handling +- [#14679](https://github.com/influxdata/telegraf/pull/14679) `inputs.kafka_consumer` Fix typo of msg_headers_as_tags +- [#14707](https://github.com/influxdata/telegraf/pull/14707) `inputs.postgresql_extensible` Add support for bool tags +- [#14659](https://github.com/influxdata/telegraf/pull/14659) `inputs.redfish` Resolve iLO4 fan data +- [#14665](https://github.com/influxdata/telegraf/pull/14665) `inputs.snmp_trap` Enable SHA ciphers +- [#14635](https://github.com/influxdata/telegraf/pull/14635) `inputs.vsphere` Use guest.guestId value if set for guest name +- [#14752](https://github.com/influxdata/telegraf/pull/14752) `outputs.mqtt` Retry metrics for server timeout +- [#14770](https://github.com/influxdata/telegraf/pull/14770) `processors.execd` Accept tracking metrics instead of dropping them +- [#14832](https://github.com/influxdata/telegraf/pull/14832) `processors.unpivot` Handle tracking metrics correctly +- [#14654](https://github.com/influxdata/telegraf/pull/14654) `rpm` Ensure telegraf is installed after useradd + +### Dependency Updates + +- [#14690](https://github.com/influxdata/telegraf/pull/14690) `deps` Bump cloud.google.com/go/bigquery from 1.57.1 to 1.58.0 +- [#14772](https://github.com/influxdata/telegraf/pull/14772) `deps` Bump cloud.google.com/go/pubsub from 1.33.0 to 1.36.1 +- [#14819](https://github.com/influxdata/telegraf/pull/14819) `deps` Bump cloud.google.com/go/storage from 1.36.0 to 1.38.0 +- [#14688](https://github.com/influxdata/telegraf/pull/14688) `deps` Bump github.com/Azure/azure-event-hubs-go/v3 from 3.6.1 to 3.6.2 +- [#14845](https://github.com/influxdata/telegraf/pull/14845) `deps` Bump github.com/DATA-DOG/go-sqlmock from 1.5.0 to 1.5.2 +- [#14820](https://github.com/influxdata/telegraf/pull/14820) `deps` Bump github.com/IBM/sarama from 1.42.1 to 1.42.2 +- [#14774](https://github.com/influxdata/telegraf/pull/14774) `deps` Bump github.com/awnumar/memguard from 0.22.4-0.20231204102859-fce56aae03b8 to 0.22.4 +- [#14687](https://github.com/influxdata/telegraf/pull/14687) `deps` Bump github.com/cloudevents/sdk-go/v2 from 2.14.0 to 2.15.0 +- [#14769](https://github.com/influxdata/telegraf/pull/14769) `deps` Bump github.com/eclipse/paho.golang from 0.11.0 to 0.20.0 +- [#14775](https://github.com/influxdata/telegraf/pull/14775) `deps` Bump github.com/google/uuid from 1.5.0 to 1.6.0 +- [#14686](https://github.com/influxdata/telegraf/pull/14686) `deps` Bump github.com/gopcua/opcua from 0.4.0 to 0.5.3 +- [#14848](https://github.com/influxdata/telegraf/pull/14848) `deps` Bump github.com/gophercloud/gophercloud from 1.7.0 to 1.9.0 +- [#14755](https://github.com/influxdata/telegraf/pull/14755) `deps` Bump github.com/gwos/tcg/sdk from v0.0.0-20220621192633-df0eac0a1a4c to v8.7.2 +- [#14816](https://github.com/influxdata/telegraf/pull/14816) `deps` Bump github.com/jhump/protoreflect from 1.15.4 to 1.15.6 +- [#14773](https://github.com/influxdata/telegraf/pull/14773) `deps` Bump github.com/klauspost/compress from 1.17.4 to 1.17.6 +- [#14817](https://github.com/influxdata/telegraf/pull/14817) `deps` Bump github.com/miekg/dns from 1.1.57 to 1.1.58 +- [#14766](https://github.com/influxdata/telegraf/pull/14766) `deps` Bump github.com/showwin/speedtest-go from 1.6.7 to 1.6.10 +- [#14765](https://github.com/influxdata/telegraf/pull/14765) `deps` Bump github.com/urfave/cli/v2 from 2.25.7 to 2.27.1 +- [#14818](https://github.com/influxdata/telegraf/pull/14818) `deps` Bump go.opentelemetry.io/collector/pdata from 1.0.1 to 1.1.0 +- [#14768](https://github.com/influxdata/telegraf/pull/14768) `deps` Bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 +- [#14849](https://github.com/influxdata/telegraf/pull/14849) `deps` Bump google.golang.org/api from 0.162.0 to 0.165.0 +- [#14847](https://github.com/influxdata/telegraf/pull/14847) `deps` Bump google.golang.org/grpc from 1.61.0 to 1.61.1 +- [#14689](https://github.com/influxdata/telegraf/pull/14689) `deps` Bump k8s.io/apimachinery from 0.29.0 to 0.29.1 +- [#14767](https://github.com/influxdata/telegraf/pull/14767) `deps` Bump k8s.io/client-go from 0.29.0 to 0.29.1 +- [#14846](https://github.com/influxdata/telegraf/pull/14846) `deps` Bump k8s.io/client-go from 0.29.1 to 0.29.2 +- [#14850](https://github.com/influxdata/telegraf/pull/14850) `deps` Bump super-linter/super-linter from 6.0.0 to 6.1.1 +- [#14771](https://github.com/influxdata/telegraf/pull/14771) `deps` Bump tj-actions/changed-files from 41 to 42 +- [#14757](https://github.com/influxdata/telegraf/pull/14757) `deps` Get rid of golang.org/x/exp and use stable versions instead +- [#14753](https://github.com/influxdata/telegraf/pull/14753) `deps` Use github.com/coreos/go-systemd/v22 instead of git version + ## v1.29.4 [2024-01-31] ### Bugfixes diff --git a/config/config.go b/config/config.go index 33ebf5d252305..2a8448a8ce2a1 100644 --- a/config/config.go +++ b/config/config.go @@ -540,7 +540,10 @@ func (c *Config) LoadConfigData(data []byte) error { } if len(c.UnusedFields) > 0 { - return fmt.Errorf("line %d: configuration specified the fields %q, but they weren't used", tbl.Line, keys(c.UnusedFields)) + return fmt.Errorf( + "line %d: configuration specified the fields %q, but they were not used. "+ + "This is either a typo or this config option does not exist in this version.", + tbl.Line, keys(c.UnusedFields)) } // Initialize the file-sorting slices @@ -575,7 +578,9 @@ func (c *Config) LoadConfigData(data []byte) error { pluginName) } if len(c.UnusedFields) > 0 { - return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", + return fmt.Errorf( + "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. "+ + "This is either a typo or this config option does not exist in this version.", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } @@ -598,7 +603,9 @@ func (c *Config) LoadConfigData(data []byte) error { pluginName) } if len(c.UnusedFields) > 0 { - return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", + return fmt.Errorf( + "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. "+ + "This is either a typo or this config option does not exist in this version.", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } @@ -617,7 +624,8 @@ func (c *Config) LoadConfigData(data []byte) error { } if len(c.UnusedFields) > 0 { return fmt.Errorf( - "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", + "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. "+ + "This is either a typo or this config option does not exist in this version.", name, pluginName, subTable.Line, @@ -639,7 +647,9 @@ func (c *Config) LoadConfigData(data []byte) error { pluginName) } if len(c.UnusedFields) > 0 { - return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", + return fmt.Errorf( + "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. "+ + "This is either a typo or this config option does not exist in this version.", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } @@ -656,7 +666,8 @@ func (c *Config) LoadConfigData(data []byte) error { return fmt.Errorf("unsupported config format: %s", pluginName) } if len(c.UnusedFields) > 0 { - msg := "plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used" + msg := "plugin %s.%s: line %d: configuration specified the fields %q, but they were not used. " + + "This is either a typo or this config option does not exist in this version." return fmt.Errorf(msg, name, pluginName, subTable.Line, keys(c.UnusedFields)) } } diff --git a/config/config_test.go b/config/config_test.go index 64e74c6e86b91..97644ac3f88dc 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -373,52 +373,62 @@ func TestConfig_FieldNotDefined(t *testing.T) { { name: "in input plugin without parser", filename: "./testdata/invalid_field.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in input plugin with parser", filename: "./testdata/invalid_field_with_parser.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in input plugin with parser func", filename: "./testdata/invalid_field_with_parserfunc.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in parser of input plugin", filename: "./testdata/invalid_field_in_parser_table.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in parser of input plugin with parser-func", filename: "./testdata/invalid_field_in_parserfunc_table.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in processor plugin without parser", filename: "./testdata/invalid_field_processor.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in processor plugin with parser", filename: "./testdata/invalid_field_processor_with_parser.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in processor plugin with parser func", filename: "./testdata/invalid_field_processor_with_parserfunc.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in parser of processor plugin", filename: "./testdata/invalid_field_processor_in_parser_table.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, { name: "in parser of processor plugin with parser-func", filename: "./testdata/invalid_field_processor_in_parserfunc_table.toml", - expected: `line 1: configuration specified the fields ["not_a_field"], but they weren't used`, + expected: "line 1: configuration specified the fields [\"not_a_field\"], but they were not used. " + + "This is either a typo or this config option does not exist in this version.", }, } diff --git a/go.mod b/go.mod index dd9c9020cc433..6b1314fdbdf17 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 github.com/BurntSushi/toml v1.3.2 github.com/ClickHouse/clickhouse-go v1.5.4 - github.com/DATA-DOG/go-sqlmock v1.5.0 + github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 github.com/IBM/sarama v1.42.2 github.com/Masterminds/sprig v2.22.0+incompatible @@ -95,7 +95,7 @@ require ( github.com/google/licensecheck v0.3.1 github.com/google/uuid v1.6.0 github.com/gopcua/opcua v0.5.3 - github.com/gophercloud/gophercloud v1.7.0 + github.com/gophercloud/gophercloud v1.9.0 github.com/gorcon/rcon v1.3.5 github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket v1.5.1 @@ -174,7 +174,7 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/sleepinggenius2/gosmi v0.4.4 github.com/snowflakedb/gosnowflake v1.7.2 - github.com/srebhan/cborquery v0.0.0-20230626165538-38be85b82316 + github.com/srebhan/cborquery v1.0.1 github.com/srebhan/protobufquery v0.0.0-20230803132024-ae4c0d878e55 github.com/stretchr/testify v1.8.4 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 @@ -208,17 +208,17 @@ require ( golang.org/x/text v0.14.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 gonum.org/v1/gonum v0.14.0 - google.golang.org/api v0.162.0 + google.golang.org/api v0.165.0 google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 - google.golang.org/grpc v1.61.0 + google.golang.org/grpc v1.61.1 google.golang.org/protobuf v1.32.0 gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/olivere/elastic.v5 v5.0.86 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/client-go v0.29.1 + k8s.io/api v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/client-go v0.29.2 layeh.com/radius v0.0.0-20221205141417-e7fbddd11d68 modernc.org/sqlite v1.28.0 ) @@ -228,6 +228,7 @@ require ( github.com/apache/arrow/go/v14 v14.0.2 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.6.0 // indirect github.com/moby/sys/user v0.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect @@ -318,7 +319,6 @@ require ( github.com/echlebek/timeproxy v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/fxamacker/cbor v1.5.1 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect github.com/go-logr/logr v1.4.1 // indirect @@ -469,10 +469,10 @@ require ( go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/consumer v0.84.0 // indirect go.opentelemetry.io/collector/semconv v0.87.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/otel v1.23.0 // indirect + go.opentelemetry.io/otel/metric v1.23.0 // indirect go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect + go.opentelemetry.io/otel/trace v1.23.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect @@ -484,7 +484,7 @@ require ( golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 9eec882d3d7ae..9e50cb6f55f93 100644 --- a/go.sum +++ b/go.sum @@ -709,8 +709,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= @@ -1142,8 +1142,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg= -github.com/fxamacker/cbor v1.5.1/go.mod h1:3aPGItF174ni7dDzd6JZ206H8cmr4GDNBGpPa971zsU= +github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= +github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -1390,8 +1390,8 @@ github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopcua/opcua v0.5.3 h1:K5QQhjK9KQxQW8doHL/Cd8oljUeXWnJJsNgP7mOGIhw= github.com/gopcua/opcua v0.5.3/go.mod h1:nrVl4/Rs3SDQRhNQ50EbAiI5JSpDrTG6Frx3s4HLnw4= -github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= -github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.9.0 h1:zKvmHOmHuaZlnx9d2DJpEgbMxrGt/+CJ/bKOKQh9Xzo= +github.com/gophercloud/gophercloud v1.9.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -1644,6 +1644,7 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -2123,8 +2124,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/srebhan/cborquery v0.0.0-20230626165538-38be85b82316 h1:HVv8JjpX24FuI59aET1uInn0ItuEiyj8CZMuR9Uw+lE= -github.com/srebhan/cborquery v0.0.0-20230626165538-38be85b82316/go.mod h1:9vX3Dhehey14KFYwWo4K/4JOJRve6jvQf6R9Y8PymLI= +github.com/srebhan/cborquery v1.0.1 h1:cFG1falVzmlfyVI8tY6hYM7RQqLxFzt9STusdxHoy0U= +github.com/srebhan/cborquery v1.0.1/go.mod h1:GgsaIoCW+qlqyU+cjSeOpaWhbiiMVkA0uU/H3+PWvjQ= github.com/srebhan/protobufquery v0.0.0-20230803132024-ae4c0d878e55 h1:ksmbrLbJAm+8yxB7fJ245usD0b1v9JHBJrWF+WqGyjs= github.com/srebhan/protobufquery v0.0.0-20230803132024-ae4c0d878e55/go.mod h1:SIB3zq5pZq2Ff7aJtCdRpGiHc/meKyMLPEj8F5Tf1j8= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= @@ -2285,22 +2286,22 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.4 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= +go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= +go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= +go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2903,8 +2904,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.162.0 h1:Vhs54HkaEpkMBdgGdOT2P6F0csGG/vxDS0hWHJzmmps= -google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0= +google.golang.org/api v0.165.0 h1:zd5d4JIIIaYYsfVy1HzoXYZ9rWCSBxxAglbczzo7Bgc= +google.golang.org/api v0.165.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3053,8 +3054,8 @@ google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafR google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -3097,8 +3098,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -3186,12 +3187,12 @@ honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= diff --git a/internal/internal_test.go b/internal/internal_test.go index a61046bb6a571..fd2b9d0f4ba91 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -759,10 +759,16 @@ func TestTimestampAbbrevWarning(t *testing.T) { log.SetOutput(&buf) defer log.SetOutput(backup) + // Try multiple timestamps with abbreviated timezones in case a user + // is actually in one of the timezones. ts, err := ParseTimestamp("RFC1123", "Mon, 02 Jan 2006 15:04:05 MST", nil) require.NoError(t, err) require.EqualValues(t, 1136239445, ts.Unix()) + ts2, err := ParseTimestamp("RFC1123", "Mon, 02 Jan 2006 15:04:05 EST", nil) + require.NoError(t, err) + require.EqualValues(t, 1136232245, ts2.Unix()) + require.Contains(t, buf.String(), "Your config is using abbreviated timezones and parsing was changed in v1.27.0") } diff --git a/internal/snmp/field.go b/internal/snmp/field.go new file mode 100644 index 0000000000000..603897ca43d36 --- /dev/null +++ b/internal/snmp/field.go @@ -0,0 +1,259 @@ +package snmp + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "strconv" + "strings" + + "github.com/gosnmp/gosnmp" +) + +// Field holds the configuration for a Field to look up. +type Field struct { + // Name will be the name of the field. + Name string + // OID is prefix for this field. The plugin will perform a walk through all + // OIDs with this as their parent. For each value found, the plugin will strip + // off the OID prefix, and use the remainder as the index. For multiple fields + // to show up in the same row, they must share the same index. + Oid string + // OidIndexSuffix is the trailing sub-identifier on a table record OID that will be stripped off to get the record's index. + OidIndexSuffix string + // OidIndexLength specifies the length of the index in OID path segments. It can be used to remove sub-identifiers that vary in content or length. + OidIndexLength int + // IsTag controls whether this OID is output as a tag or a value. + IsTag bool + // Conversion controls any type conversion that is done on the value. + // "float"/"float(0)" will convert the value into a float. + // "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. + // "int" will convert the value into an integer. + // "hwaddr" will convert a 6-byte string to a MAC address. + // "ipaddr" will convert the value to an IPv4 or IPv6 address. + // "enum"/"enum(1)" will convert the value according to its syntax. (Only supported with gosmi translator) + Conversion string + // Translate tells if the value of the field should be snmptranslated + Translate bool + // Secondary index table allows to merge data from two tables with different index + // that this filed will be used to join them. There can be only one secondary index table. + SecondaryIndexTable bool + // This field is using secondary index, and will be later merged with primary index + // using SecondaryIndexTable. SecondaryIndexTable and SecondaryIndexUse are exclusive. + SecondaryIndexUse bool + // Controls if entries from secondary table should be added or not if joining + // index is present or not. I set to true, means that join is outer, and + // index is prepended with "Secondary." for missing values to avoid overlapping + // indexes from both tables. + // Can be set per field or globally with SecondaryIndexTable, global true overrides + // per field false. + SecondaryOuterJoin bool + + initialized bool + translator Translator +} + +// init() converts OID names to numbers, and sets the .Name attribute if unset. +func (f *Field) Init(tr Translator) error { + if f.initialized { + return nil + } + + f.translator = tr + + // check if oid needs translation or name is not set + if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" { + _, oidNum, oidText, conversion, err := f.translator.SnmpTranslate(f.Oid) + if err != nil { + return fmt.Errorf("translating: %w", err) + } + f.Oid = oidNum + if f.Name == "" { + f.Name = oidText + } + if f.Conversion == "" { + f.Conversion = conversion + } + //TODO use textual convention conversion from the MIB + } + + if f.SecondaryIndexTable && f.SecondaryIndexUse { + return errors.New("SecondaryIndexTable and UseSecondaryIndex are exclusive") + } + + if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin { + return errors.New("SecondaryOuterJoin set to true, but field is not being used in join") + } + + f.initialized = true + return nil +} + +// fieldConvert converts from any type according to the conv specification +func (f *Field) Convert(ent gosnmp.SnmpPDU) (v interface{}, err error) { + if f.Conversion == "" { + if bs, ok := ent.Value.([]byte); ok { + return string(bs), nil + } + return ent.Value, nil + } + + var d int + if _, err := fmt.Sscanf(f.Conversion, "float(%d)", &d); err == nil || f.Conversion == "float" { + v = ent.Value + switch vt := v.(type) { + case float32: + v = float64(vt) / math.Pow10(d) + case float64: + v = vt / math.Pow10(d) + case int: + v = float64(vt) / math.Pow10(d) + case int8: + v = float64(vt) / math.Pow10(d) + case int16: + v = float64(vt) / math.Pow10(d) + case int32: + v = float64(vt) / math.Pow10(d) + case int64: + v = float64(vt) / math.Pow10(d) + case uint: + v = float64(vt) / math.Pow10(d) + case uint8: + v = float64(vt) / math.Pow10(d) + case uint16: + v = float64(vt) / math.Pow10(d) + case uint32: + v = float64(vt) / math.Pow10(d) + case uint64: + v = float64(vt) / math.Pow10(d) + case []byte: + vf, _ := strconv.ParseFloat(string(vt), 64) + v = vf / math.Pow10(d) + case string: + vf, _ := strconv.ParseFloat(vt, 64) + v = vf / math.Pow10(d) + } + return v, nil + } + + if f.Conversion == "int" { + v = ent.Value + switch vt := v.(type) { + case float32: + v = int64(vt) + case float64: + v = int64(vt) + case int: + v = int64(vt) + case int8: + v = int64(vt) + case int16: + v = int64(vt) + case int32: + v = int64(vt) + case int64: + v = vt + case uint: + v = int64(vt) + case uint8: + v = int64(vt) + case uint16: + v = int64(vt) + case uint32: + v = int64(vt) + case uint64: + v = int64(vt) + case []byte: + v, _ = strconv.ParseInt(string(vt), 10, 64) + case string: + v, _ = strconv.ParseInt(vt, 10, 64) + } + return v, nil + } + + if f.Conversion == "hwaddr" { + switch vt := ent.Value.(type) { + case string: + v = net.HardwareAddr(vt).String() + case []byte: + v = net.HardwareAddr(vt).String() + default: + return nil, fmt.Errorf("invalid type (%T) for hwaddr conversion", v) + } + return v, nil + } + + split := strings.Split(f.Conversion, ":") + if split[0] == "hextoint" && len(split) == 3 { + endian := split[1] + bit := split[2] + + bv, ok := ent.Value.([]byte) + if !ok { + return ent.Value, nil + } + + switch endian { + case "LittleEndian": + switch bit { + case "uint64": + v = binary.LittleEndian.Uint64(bv) + case "uint32": + v = binary.LittleEndian.Uint32(bv) + case "uint16": + v = binary.LittleEndian.Uint16(bv) + default: + return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) + } + case "BigEndian": + switch bit { + case "uint64": + v = binary.BigEndian.Uint64(bv) + case "uint32": + v = binary.BigEndian.Uint32(bv) + case "uint16": + v = binary.BigEndian.Uint16(bv) + default: + return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) + } + default: + return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian) + } + + return v, nil + } + + if f.Conversion == "ipaddr" { + var ipbs []byte + + switch vt := ent.Value.(type) { + case string: + ipbs = []byte(vt) + case []byte: + ipbs = vt + default: + return nil, fmt.Errorf("invalid type (%T) for ipaddr conversion", v) + } + + switch len(ipbs) { + case 4, 16: + v = net.IP(ipbs).String() + default: + return nil, fmt.Errorf("invalid length (%d) for ipaddr conversion", len(ipbs)) + } + + return v, nil + } + + if f.Conversion == "enum" { + return f.translator.SnmpFormatEnum(ent.Name, ent.Value, false) + } + + if f.Conversion == "enum(1)" { + return f.translator.SnmpFormatEnum(ent.Name, ent.Value, true) + } + + return nil, fmt.Errorf("invalid conversion type %q", f.Conversion) +} diff --git a/internal/snmp/mib_loader.go b/internal/snmp/mib_loader.go new file mode 100644 index 0000000000000..e3bdc4ee0ce7f --- /dev/null +++ b/internal/snmp/mib_loader.go @@ -0,0 +1,140 @@ +package snmp + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/sleepinggenius2/gosmi" + + "github.com/influxdata/telegraf" +) + +// must init, append path for each directory, load module for every file +// or gosmi will fail without saying why +var m sync.Mutex +var once sync.Once +var cache = make(map[string]bool) + +type MibLoader interface { + // appendPath takes the path of a directory + appendPath(path string) + + // loadModule takes the name of a file in one of the + // directories. Basename only, no relative or absolute path + loadModule(path string) error +} + +type GosmiMibLoader struct{} + +func (*GosmiMibLoader) appendPath(path string) { + m.Lock() + defer m.Unlock() + + gosmi.AppendPath(path) +} + +func (*GosmiMibLoader) loadModule(path string) error { + m.Lock() + defer m.Unlock() + + _, err := gosmi.LoadModule(path) + return err +} + +// will give all found folders to gosmi and load in all modules found in the folders +func LoadMibsFromPath(paths []string, log telegraf.Logger, loader MibLoader) error { + folders, err := walkPaths(paths, log) + if err != nil { + return err + } + for _, path := range folders { + loader.appendPath(path) + modules, err := os.ReadDir(path) + if err != nil { + log.Warnf("Can't read directory %v", modules) + continue + } + + for _, entry := range modules { + info, err := entry.Info() + if err != nil { + log.Warnf("Couldn't get info for %v: %v", entry.Name(), err) + continue + } + if info.Mode()&os.ModeSymlink != 0 { + symlink := filepath.Join(path, info.Name()) + target, err := filepath.EvalSymlinks(symlink) + if err != nil { + log.Warnf("Couldn't evaluate symbolic links for %v: %v", symlink, err) + continue + } + //replace symlink's info with the target's info + info, err = os.Lstat(target) + if err != nil { + log.Warnf("Couldn't stat target %v: %v", target, err) + continue + } + } + if info.Mode().IsRegular() { + err := loader.loadModule(info.Name()) + if err != nil { + log.Warnf("Couldn't load module %v: %v", info.Name(), err) + continue + } + } + } + } + return nil +} + +// should walk the paths given and find all folders +func walkPaths(paths []string, log telegraf.Logger) ([]string, error) { + once.Do(gosmi.Init) + folders := []string{} + + for _, mibPath := range paths { + // Check if we loaded that path already and skip it if so + m.Lock() + cached := cache[mibPath] + cache[mibPath] = true + m.Unlock() + if cached { + continue + } + + err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { + if info == nil { + log.Warnf("No mibs found") + if os.IsNotExist(err) { + log.Warnf("MIB path doesn't exist: %q", mibPath) + } else if err != nil { + return err + } + return nil + } + + if info.Mode()&os.ModeSymlink != 0 { + target, err := filepath.EvalSymlinks(path) + if err != nil { + log.Warnf("Couldn't evaluate symbolic links for %v: %v", path, err) + } + info, err = os.Lstat(target) + if err != nil { + log.Warnf("Couldn't stat target %v: %v", target, err) + } + path = target + } + if info.IsDir() { + folders = append(folders, path) + } + + return nil + }) + if err != nil { + return folders, fmt.Errorf("couldn't walk path %q: %w", mibPath, err) + } + } + return folders, nil +} diff --git a/internal/snmp/mib_loader_test.go b/internal/snmp/mib_loader_test.go new file mode 100644 index 0000000000000..88ff48e9281cb --- /dev/null +++ b/internal/snmp/mib_loader_test.go @@ -0,0 +1,87 @@ +package snmp + +import ( + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +type TestingMibLoader struct { + folders []string + files []string +} + +func (t *TestingMibLoader) appendPath(path string) { + t.folders = append(t.folders, path) +} + +func (t *TestingMibLoader) loadModule(path string) error { + t.files = append(t.files, path) + return nil +} +func TestFolderLookup(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping on windows") + } + + tests := []struct { + name string + mibPath [][]string + paths [][]string + files []string + }{ + { + name: "loading folders", + mibPath: [][]string{{"testdata", "loadMibsFromPath", "root"}}, + paths: [][]string{ + {"testdata", "loadMibsFromPath", "root"}, + {"testdata", "loadMibsFromPath", "root", "dirOne"}, + {"testdata", "loadMibsFromPath", "root", "dirOne", "dirTwo"}, + {"testdata", "loadMibsFromPath", "linkTarget"}, + }, + files: []string{"empty", "emptyFile"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + loader := TestingMibLoader{} + + var givenPath []string + for _, paths := range tt.mibPath { + rootPath := filepath.Join(paths...) + givenPath = append(givenPath, rootPath) + } + + err := LoadMibsFromPath(givenPath, testutil.Logger{}, &loader) + require.NoError(t, err) + + var folders []string + for _, pathSlice := range tt.paths { + path := filepath.Join(pathSlice...) + folders = append(folders, path) + } + require.Equal(t, folders, loader.folders) + + require.Equal(t, tt.files, loader.files) + }) + } +} + +func TestMissingMibPath(t *testing.T) { + log := testutil.Logger{} + path := []string{"non-existing-directory"} + require.NoError(t, LoadMibsFromPath(path, log, &GosmiMibLoader{})) +} + +func BenchmarkMibLoading(b *testing.B) { + log := testutil.Logger{} + path := []string{"testdata/gosmi"} + for i := 0; i < b.N; i++ { + require.NoError(b, LoadMibsFromPath(path, log, &GosmiMibLoader{})) + } +} diff --git a/internal/snmp/table.go b/internal/snmp/table.go new file mode 100644 index 0000000000000..6bc712e09f4fd --- /dev/null +++ b/internal/snmp/table.go @@ -0,0 +1,315 @@ +package snmp + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/gosnmp/gosnmp" +) + +// Table holds the configuration for a SNMP table. +type Table struct { + // Name will be the name of the measurement. + Name string + + // Which tags to inherit from the top-level config. + InheritTags []string + + // Adds each row's table index as a tag. + IndexAsTag bool + + // Fields is the tags and values to look up. + Fields []Field `toml:"field"` + + // OID for automatic field population. + // If provided, init() will populate Fields with all the table columns of the + // given OID. + Oid string + + initialized bool + translator Translator +} + +// RTable is the resulting table built from a Table. +type RTable struct { + // Name is the name of the field, copied from Table.Name. + Name string + // Time is the time the table was built. + Time time.Time + // Rows are the rows that were found, one row for each table OID index found. + Rows []RTableRow +} + +// RTableRow is the resulting row containing all the OID values which shared +// the same index. +type RTableRow struct { + // Tags are all the Field values which had IsTag=true. + Tags map[string]string + // Fields are all the Field values which had IsTag=false. + Fields map[string]interface{} +} + +// Init() builds & initializes the nested fields. +func (t *Table) Init(tr Translator) error { + //makes sure oid or name is set in config file + //otherwise snmp will produce metrics with an empty name + if t.Oid == "" && t.Name == "" { + return errors.New("SNMP table in config file is not named. One or both of the oid and name settings must be set") + } + + if t.initialized { + return nil + } + + t.translator = tr + if err := t.initBuild(); err != nil { + return err + } + + secondaryIndexTablePresent := false + // initialize all the nested fields + for i := range t.Fields { + if err := t.Fields[i].Init(t.translator); err != nil { + return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err) + } + if t.Fields[i].SecondaryIndexTable { + if secondaryIndexTablePresent { + return errors.New("only one field can be SecondaryIndexTable") + } + secondaryIndexTablePresent = true + } + } + + t.initialized = true + return nil +} + +// initBuild initializes the table if it has an OID configured. If so, the +// net-snmp tools will be used to look up the OID and auto-populate the table's +// fields. +func (t *Table) initBuild() error { + if t.Oid == "" { + return nil + } + + _, _, oidText, fields, err := t.translator.SnmpTable(t.Oid) + if err != nil { + return err + } + + if t.Name == "" { + t.Name = oidText + } + + knownOIDs := map[string]bool{} + for _, f := range t.Fields { + knownOIDs[f.Oid] = true + } + for _, f := range fields { + if !knownOIDs[f.Oid] { + t.Fields = append(t.Fields, f) + } + } + + return nil +} + +// Build retrieves all the fields specified in the table and constructs the RTable. +func (t Table) Build(gs Connection, walk bool) (*RTable, error) { + rows := map[string]RTableRow{} + + //translation table for secondary index (when preforming join on two tables) + secIdxTab := make(map[string]string) + secGlobalOuterJoin := false + for i, f := range t.Fields { + if f.SecondaryIndexTable { + secGlobalOuterJoin = f.SecondaryOuterJoin + if i != 0 { + t.Fields[0], t.Fields[i] = t.Fields[i], t.Fields[0] + } + break + } + } + + tagCount := 0 + for _, f := range t.Fields { + if f.IsTag { + tagCount++ + } + + if len(f.Oid) == 0 { + return nil, fmt.Errorf("cannot have empty OID on field %s", f.Name) + } + var oid string + if f.Oid[0] == '.' { + oid = f.Oid + } else { + // make sure OID has "." because the BulkWalkAll results do, and the prefix needs to match + oid = "." + f.Oid + } + + // ifv contains a mapping of table OID index to field value + ifv := map[string]interface{}{} + + if !walk { + // This is used when fetching non-table fields. Fields configured a the top + // scope of the plugin. + // We fetch the fields directly, and add them to ifv as if the index were an + // empty string. This results in all the non-table fields sharing the same + // index, and being added on the same row. + if pkt, err := gs.Get([]string{oid}); err != nil { + if errors.Is(err, gosnmp.ErrUnknownSecurityLevel) { + return nil, errors.New("unknown security level (sec_level)") + } else if errors.Is(err, gosnmp.ErrUnknownUsername) { + return nil, errors.New("unknown username (sec_name)") + } else if errors.Is(err, gosnmp.ErrWrongDigest) { + return nil, errors.New("wrong digest (auth_protocol, auth_password)") + } else if errors.Is(err, gosnmp.ErrDecryption) { + return nil, errors.New("decryption error (priv_protocol, priv_password)") + } + return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) + } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { + ent := pkt.Variables[0] + fv, err := f.Convert(ent) + if err != nil { + return nil, fmt.Errorf("converting %q (OID %s) for field %s: %w", ent.Value, ent.Name, f.Name, err) + } + ifv[""] = fv + } + } else { + err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error { + if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." { + return &walkError{} // break the walk + } + + idx := ent.Name[len(oid):] + if f.OidIndexSuffix != "" { + if !strings.HasSuffix(idx, f.OidIndexSuffix) { + // this entry doesn't match our OidIndexSuffix. skip it + return nil + } + idx = idx[:len(idx)-len(f.OidIndexSuffix)] + } + if f.OidIndexLength != 0 { + i := f.OidIndexLength + 1 // leading separator + idx = strings.Map(func(r rune) rune { + if r == '.' { + i-- + } + if i < 1 { + return -1 + } + return r + }, idx) + } + + // snmptranslate table field value here + if f.Translate { + if entOid, ok := ent.Value.(string); ok { + _, _, oidText, _, err := t.translator.SnmpTranslate(entOid) + if err == nil { + // If no error translating, the original value for ent.Value should be replaced + ent.Value = oidText + } + } + } + + fv, err := f.Convert(ent) + if err != nil { + return &walkError{ + msg: fmt.Sprintf("converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name), + err: err, + } + } + ifv[idx] = fv + return nil + }) + if err != nil { + // Our callback always wraps errors in a walkError. + // If this error isn't a walkError, we know it's not + // from the callback + var walkErr *walkError + if !errors.As(err, &walkErr) { + return nil, fmt.Errorf("performing bulk walk for field %s: %w", f.Name, err) + } + } + } + + for idx, v := range ifv { + if f.SecondaryIndexUse { + if newidx, ok := secIdxTab[idx]; ok { + idx = newidx + } else { + if !secGlobalOuterJoin && !f.SecondaryOuterJoin { + continue + } + idx = ".Secondary" + idx + } + } + rtr, ok := rows[idx] + if !ok { + rtr = RTableRow{} + rtr.Tags = map[string]string{} + rtr.Fields = map[string]interface{}{} + rows[idx] = rtr + } + if t.IndexAsTag && idx != "" { + if idx[0] == '.' { + idx = idx[1:] + } + rtr.Tags["index"] = idx + } + // don't add an empty string + if vs, ok := v.(string); !ok || vs != "" { + if f.IsTag { + if ok { + rtr.Tags[f.Name] = vs + } else { + rtr.Tags[f.Name] = fmt.Sprintf("%v", v) + } + } else { + rtr.Fields[f.Name] = v + } + if f.SecondaryIndexTable { + //indexes are stored here with prepending "." so we need to add them if needed + var vss string + if ok { + vss = "." + vs + } else { + vss = fmt.Sprintf(".%v", v) + } + if idx[0] == '.' { + secIdxTab[vss] = idx + } else { + secIdxTab[vss] = "." + idx + } + } + } + } + } + + rt := RTable{ + Name: t.Name, + Time: time.Now(), //TODO record time at start + Rows: make([]RTableRow, 0, len(rows)), + } + for _, r := range rows { + rt.Rows = append(rt.Rows, r) + } + return &rt, nil +} + +type walkError struct { + msg string + err error +} + +func (e *walkError) Error() string { + return e.msg +} + +func (e *walkError) Unwrap() error { + return e.err +} diff --git a/internal/snmp/table_test.go b/internal/snmp/table_test.go new file mode 100644 index 0000000000000..4731465baa931 --- /dev/null +++ b/internal/snmp/table_test.go @@ -0,0 +1,246 @@ +package snmp + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTableJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + require.Equal(t, "mytable", tb.Name) + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) +} + +func TestTableOuterJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + SecondaryOuterJoin: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + require.Equal(t, "mytable", tb.Name) + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{ + "index": "Secondary.0", + "myfield4": "foo", + }, + Fields: map[string]interface{}{ + "myfield5": 1, + }, + } + require.Len(t, tb.Rows, 4) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) + require.Contains(t, tb.Rows, rtr4) +} + +func TestTableJoinNoIndexAsTag_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: false, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + require.Equal(t, "mytable", tb.Name) + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + //"index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + //"index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + //"index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) +} diff --git a/plugins/inputs/snmp/testdata/bridgeMib b/internal/snmp/testdata/gosmi/bridgeMib similarity index 100% rename from plugins/inputs/snmp/testdata/bridgeMib rename to internal/snmp/testdata/gosmi/bridgeMib diff --git a/plugins/inputs/snmp/testdata/bridgeMibImports b/internal/snmp/testdata/gosmi/bridgeMibImports similarity index 100% rename from plugins/inputs/snmp/testdata/bridgeMibImports rename to internal/snmp/testdata/gosmi/bridgeMibImports diff --git a/plugins/inputs/snmp/testdata/foo b/internal/snmp/testdata/gosmi/foo similarity index 100% rename from plugins/inputs/snmp/testdata/foo rename to internal/snmp/testdata/gosmi/foo diff --git a/plugins/inputs/snmp/testdata/fooImports b/internal/snmp/testdata/gosmi/fooImports similarity index 100% rename from plugins/inputs/snmp/testdata/fooImports rename to internal/snmp/testdata/gosmi/fooImports diff --git a/plugins/inputs/snmp/testdata/ifPhysAddress b/internal/snmp/testdata/gosmi/ifPhysAddress similarity index 100% rename from plugins/inputs/snmp/testdata/ifPhysAddress rename to internal/snmp/testdata/gosmi/ifPhysAddress diff --git a/plugins/inputs/snmp/testdata/ifPhysAddressImports b/internal/snmp/testdata/gosmi/ifPhysAddressImports similarity index 100% rename from plugins/inputs/snmp/testdata/ifPhysAddressImports rename to internal/snmp/testdata/gosmi/ifPhysAddressImports diff --git a/plugins/inputs/snmp/testdata/server b/internal/snmp/testdata/gosmi/server similarity index 100% rename from plugins/inputs/snmp/testdata/server rename to internal/snmp/testdata/gosmi/server diff --git a/plugins/inputs/snmp/testdata/serverImports b/internal/snmp/testdata/gosmi/serverImports similarity index 100% rename from plugins/inputs/snmp/testdata/serverImports rename to internal/snmp/testdata/gosmi/serverImports diff --git a/plugins/inputs/snmp/testdata/tableBuild b/internal/snmp/testdata/gosmi/tableBuild similarity index 100% rename from plugins/inputs/snmp/testdata/tableBuild rename to internal/snmp/testdata/gosmi/tableBuild diff --git a/plugins/inputs/snmp/testdata/tableMib b/internal/snmp/testdata/gosmi/tableMib similarity index 100% rename from plugins/inputs/snmp/testdata/tableMib rename to internal/snmp/testdata/gosmi/tableMib diff --git a/plugins/inputs/snmp/testdata/tableMibImports b/internal/snmp/testdata/gosmi/tableMibImports similarity index 100% rename from plugins/inputs/snmp/testdata/tableMibImports rename to internal/snmp/testdata/gosmi/tableMibImports diff --git a/plugins/inputs/snmp/testdata/tcpMib b/internal/snmp/testdata/gosmi/tcpMib similarity index 100% rename from plugins/inputs/snmp/testdata/tcpMib rename to internal/snmp/testdata/gosmi/tcpMib diff --git a/plugins/inputs/snmp/testdata/tcpMibImports b/internal/snmp/testdata/gosmi/tcpMibImports similarity index 100% rename from plugins/inputs/snmp/testdata/tcpMibImports rename to internal/snmp/testdata/gosmi/tcpMibImports diff --git a/internal/snmp/translate.go b/internal/snmp/translate.go deleted file mode 100644 index dc3798254adb9..0000000000000 --- a/internal/snmp/translate.go +++ /dev/null @@ -1,281 +0,0 @@ -package snmp - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/sleepinggenius2/gosmi" - "github.com/sleepinggenius2/gosmi/types" - - "github.com/influxdata/telegraf" -) - -// must init, append path for each directory, load module for every file -// or gosmi will fail without saying why -var m sync.Mutex -var once sync.Once -var cache = make(map[string]bool) - -type MibLoader interface { - // appendPath takes the path of a directory - appendPath(path string) - - // loadModule takes the name of a file in one of the - // directories. Basename only, no relative or absolute path - loadModule(path string) error -} - -type GosmiMibLoader struct{} - -func (*GosmiMibLoader) appendPath(path string) { - m.Lock() - defer m.Unlock() - - gosmi.AppendPath(path) -} - -func (*GosmiMibLoader) loadModule(path string) error { - m.Lock() - defer m.Unlock() - - _, err := gosmi.LoadModule(path) - return err -} - -// will give all found folders to gosmi and load in all modules found in the folders -func LoadMibsFromPath(paths []string, log telegraf.Logger, loader MibLoader) error { - folders, err := walkPaths(paths, log) - if err != nil { - return err - } - for _, path := range folders { - loader.appendPath(path) - modules, err := os.ReadDir(path) - if err != nil { - log.Warnf("Can't read directory %v", modules) - continue - } - - for _, entry := range modules { - info, err := entry.Info() - if err != nil { - log.Warnf("Couldn't get info for %v: %v", entry.Name(), err) - continue - } - if info.Mode()&os.ModeSymlink != 0 { - symlink := filepath.Join(path, info.Name()) - target, err := filepath.EvalSymlinks(symlink) - if err != nil { - log.Warnf("Couldn't evaluate symbolic links for %v: %v", symlink, err) - continue - } - //replace symlink's info with the target's info - info, err = os.Lstat(target) - if err != nil { - log.Warnf("Couldn't stat target %v: %v", target, err) - continue - } - } - if info.Mode().IsRegular() { - err := loader.loadModule(info.Name()) - if err != nil { - log.Warnf("Couldn't load module %v: %v", info.Name(), err) - continue - } - } - } - } - return nil -} - -// should walk the paths given and find all folders -func walkPaths(paths []string, log telegraf.Logger) ([]string, error) { - once.Do(gosmi.Init) - folders := []string{} - - for _, mibPath := range paths { - // Check if we loaded that path already and skip it if so - m.Lock() - cached := cache[mibPath] - cache[mibPath] = true - m.Unlock() - if cached { - continue - } - - err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { - if info == nil { - log.Warnf("No mibs found") - if os.IsNotExist(err) { - log.Warnf("MIB path doesn't exist: %q", mibPath) - } else if err != nil { - return err - } - return nil - } - - if info.Mode()&os.ModeSymlink != 0 { - target, err := filepath.EvalSymlinks(path) - if err != nil { - log.Warnf("Couldn't evaluate symbolic links for %v: %v", path, err) - } - info, err = os.Lstat(target) - if err != nil { - log.Warnf("Couldn't stat target %v: %v", target, err) - } - path = target - } - if info.IsDir() { - folders = append(folders, path) - } - - return nil - }) - if err != nil { - return folders, fmt.Errorf("couldn't walk path %q: %w", mibPath, err) - } - } - return folders, nil -} - -// The following is for snmp_trap -type MibEntry struct { - MibName string - OidText string -} - -func TrapLookup(oid string) (e MibEntry, err error) { - var givenOid types.Oid - if givenOid, err = types.OidFromString(oid); err != nil { - return e, fmt.Errorf("could not convert OID %s: %w", oid, err) - } - - // Get node name - var node gosmi.SmiNode - if node, err = gosmi.GetNodeByOID(givenOid); err != nil { - return e, err - } - e.OidText = node.Name - - // Add not found OID part - if !givenOid.Equals(node.Oid) { - e.OidText += "." + givenOid[len(node.Oid):].String() - } - - // Get module name - module := node.GetModule() - if module.Name != "" { - e.MibName = module.Name - } - - return e, nil -} - -// The following is for snmp - -func GetIndex(mibPrefix string, node gosmi.SmiNode) (col []string, tagOids map[string]struct{}) { - // first attempt to get the table's tags - tagOids = map[string]struct{}{} - - // mimcks grabbing INDEX {} that is returned from snmptranslate -Td MibName - for _, index := range node.GetIndex() { - tagOids[mibPrefix+index.Name] = struct{}{} - } - - // grabs all columns from the table - // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName - _, col = node.GetColumns() - - return col, tagOids -} - -//nolint:revive //Too many return variable but necessary -func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, node gosmi.SmiNode, err error) { - var out gosmi.SmiNode - var end string - if strings.ContainsAny(oid, "::") { - // split given oid - // for example RFC1213-MIB::sysUpTime.0 - s := strings.SplitN(oid, "::", 2) - // moduleName becomes RFC1213 - moduleName := s[0] - module, err := gosmi.GetModule(moduleName) - if err != nil { - return oid, oid, oid, oid, gosmi.SmiNode{}, err - } - if s[1] == "" { - return "", oid, oid, oid, gosmi.SmiNode{}, fmt.Errorf("cannot parse %v", oid) - } - // node becomes sysUpTime.0 - node := s[1] - if strings.ContainsAny(node, ".") { - s = strings.SplitN(node, ".", 2) - // node becomes sysUpTime - node = s[0] - end = "." + s[1] - } - - out, err = module.GetNode(node) - if err != nil { - return oid, oid, oid, oid, out, err - } - - if oidNum = out.RenderNumeric(); oidNum == "" { - return oid, oid, oid, oid, out, fmt.Errorf("cannot translate %v into a numeric OID, please ensure all imported MIBs are in the path", oid) - } - - oidNum = "." + oidNum + end - } else if strings.ContainsAny(oid, "abcdefghijklnmopqrstuvwxyz") { - //handle mixed oid ex. .iso.2.3 - s := strings.Split(oid, ".") - for i := range s { - if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") { - out, err = gosmi.GetNode(s[i]) - if err != nil { - return oid, oid, oid, oid, out, err - } - s[i] = out.RenderNumeric() - } - } - oidNum = strings.Join(s, ".") - out, _ = gosmi.GetNodeByOID(types.OidMustFromString(oidNum)) - } else { - out, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) - oidNum = oid - // ensure modules are loaded or node will be empty (might not error) - //nolint:nilerr // do not return the err as the oid is numeric and telegraf can continue - if err != nil || out.Name == "iso" { - return oid, oid, oid, oid, out, nil - } - } - - tc := out.GetSubtree() - - for i := range tc { - // case where the mib doesn't have a conversion so Type struct will be nil - // prevents seg fault - if tc[i].Type == nil { - break - } - switch tc[i].Type.Name { - case "MacAddress", "PhysAddress": - conversion = "hwaddr" - case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": - conversion = "ipaddr" - } - } - - oidText = out.RenderQualified() - i := strings.Index(oidText, "::") - if i == -1 { - return "", oid, oid, oid, out, errors.New("not found") - } - mibName = oidText[:i] - oidText = oidText[i+2:] + end - - return mibName, oidNum, oidText, conversion, out, nil -} diff --git a/internal/snmp/translate_test.go b/internal/snmp/translate_test.go deleted file mode 100644 index 9f22947e8ad29..0000000000000 --- a/internal/snmp/translate_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package snmp - -import ( - "path/filepath" - "runtime" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/influxdata/telegraf/testutil" -) - -func TestTrapLookup(t *testing.T) { - tests := []struct { - name string - oid string - expected MibEntry - }{ - { - name: "Known trap OID", - oid: ".1.3.6.1.6.3.1.1.5.1", - expected: MibEntry{ - MibName: "TGTEST-MIB", - OidText: "coldStart", - }, - }, - { - name: "Known trap value OID", - oid: ".1.3.6.1.2.1.1.3.0", - expected: MibEntry{ - MibName: "TGTEST-MIB", - OidText: "sysUpTimeInstance", - }, - }, - { - name: "Unknown enterprise sub-OID", - oid: ".1.3.6.1.4.1.0.1.2.3", - expected: MibEntry{ - MibName: "TGTEST-MIB", - OidText: "enterprises.0.1.2.3", - }, - }, - { - name: "Unknown MIB", - oid: ".1.2.3", - expected: MibEntry{OidText: "iso.2.3"}, - }, - } - - // Load the MIBs - require.NoError(t, LoadMibsFromPath([]string{"testdata/mibs"}, testutil.Logger{}, &GosmiMibLoader{})) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Run the actual test - actual, err := TrapLookup(tt.oid) - require.NoError(t, err) - require.Equal(t, tt.expected, actual) - }) - } -} - -func TestTrapLookupFail(t *testing.T) { - tests := []struct { - name string - oid string - expected string - }{ - { - name: "New top level OID", - oid: ".3.6.1.3.0", - expected: "Could not find node for OID 3.6.1.3.0", - }, - { - name: "Malformed OID", - oid: ".1.3.dod.1.3.0", - expected: "could not convert OID .1.3.dod.1.3.0: strconv.ParseUint: parsing \"dod\": invalid syntax", - }, - } - - // Load the MIBs - require.NoError(t, LoadMibsFromPath([]string{"testdata/mibs"}, testutil.Logger{}, &GosmiMibLoader{})) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Run the actual test - _, err := TrapLookup(tt.oid) - require.EqualError(t, err, tt.expected) - }) - } -} - -type TestingMibLoader struct { - folders []string - files []string -} - -func (t *TestingMibLoader) appendPath(path string) { - t.folders = append(t.folders, path) -} - -func (t *TestingMibLoader) loadModule(path string) error { - t.files = append(t.files, path) - return nil -} -func TestFolderLookup(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Skipping on windows") - } - - tests := []struct { - name string - mibPath [][]string - paths [][]string - files []string - }{ - { - name: "loading folders", - mibPath: [][]string{{"testdata", "loadMibsFromPath", "root"}}, - paths: [][]string{ - {"testdata", "loadMibsFromPath", "root"}, - {"testdata", "loadMibsFromPath", "root", "dirOne"}, - {"testdata", "loadMibsFromPath", "root", "dirOne", "dirTwo"}, - {"testdata", "loadMibsFromPath", "linkTarget"}, - }, - files: []string{"empty", "emptyFile"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - loader := TestingMibLoader{} - - var givenPath []string - for _, paths := range tt.mibPath { - rootPath := filepath.Join(paths...) - givenPath = append(givenPath, rootPath) - } - - err := LoadMibsFromPath(givenPath, testutil.Logger{}, &loader) - require.NoError(t, err) - - var folders []string - for _, pathSlice := range tt.paths { - path := filepath.Join(pathSlice...) - folders = append(folders, path) - } - require.Equal(t, folders, loader.folders) - - require.Equal(t, tt.files, loader.files) - }) - } -} diff --git a/internal/snmp/translator.go b/internal/snmp/translator.go index 6a0993a6d1a04..720ee4f15dcde 100644 --- a/internal/snmp/translator.go +++ b/internal/snmp/translator.go @@ -3,3 +3,22 @@ package snmp type TranslatorPlugin interface { SetTranslator(name string) // Agent calls this on inputs before Init } + +type Translator interface { + SnmpTranslate(oid string) ( + mibName string, oidNum string, oidText string, + conversion string, + err error, + ) + + SnmpTable(oid string) ( + mibName string, oidNum string, oidText string, + fields []Field, + err error, + ) + + SnmpFormatEnum(oid string, value interface{}, full bool) ( + formatted string, + err error, + ) +} diff --git a/internal/snmp/translator_gosmi.go b/internal/snmp/translator_gosmi.go new file mode 100644 index 0000000000000..10509c62ea714 --- /dev/null +++ b/internal/snmp/translator_gosmi.go @@ -0,0 +1,208 @@ +package snmp + +import ( + "errors" + "fmt" + "strings" + + "github.com/sleepinggenius2/gosmi" + "github.com/sleepinggenius2/gosmi/models" + "github.com/sleepinggenius2/gosmi/types" + + "github.com/influxdata/telegraf" +) + +type gosmiTranslator struct { +} + +func NewGosmiTranslator(paths []string, log telegraf.Logger) (*gosmiTranslator, error) { + err := LoadMibsFromPath(paths, log, &GosmiMibLoader{}) + if err == nil { + return &gosmiTranslator{}, nil + } + return nil, err +} + +//nolint:revive //function-result-limit conditionally 5 return results allowed +func (g *gosmiTranslator) SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { + mibName, oidNum, oidText, conversion, _, err = snmpTranslateCall(oid) + return mibName, oidNum, oidText, conversion, err +} + +// snmpTable resolves the given OID as a table, providing information about the +// table and fields within. +// +//nolint:revive //Too many return variable but necessary +func (g *gosmiTranslator) SnmpTable(oid string) ( + mibName string, oidNum string, oidText string, + fields []Field, + err error) { + mibName, oidNum, oidText, _, node, err := snmpTranslateCall(oid) + if err != nil { + return "", "", "", nil, fmt.Errorf("translating: %w", err) + } + + mibPrefix := mibName + "::" + + col, tagOids := getIndex(mibPrefix, node) + for _, c := range col { + _, isTag := tagOids[mibPrefix+c] + fields = append(fields, Field{Name: c, Oid: mibPrefix + c, IsTag: isTag}) + } + + return mibName, oidNum, oidText, fields, nil +} + +func (g *gosmiTranslator) SnmpFormatEnum(oid string, value interface{}, full bool) (string, error) { + //nolint:dogsled // only need to get the node + _, _, _, _, node, err := snmpTranslateCall(oid) + + if err != nil { + return "", err + } + + var v models.Value + if full { + v = node.FormatValue(value, models.FormatEnumName, models.FormatEnumValue) + } else { + v = node.FormatValue(value, models.FormatEnumName) + } + + return v.Formatted, nil +} + +func getIndex(mibPrefix string, node gosmi.SmiNode) (col []string, tagOids map[string]struct{}) { + // first attempt to get the table's tags + tagOids = map[string]struct{}{} + + // mimcks grabbing INDEX {} that is returned from snmptranslate -Td MibName + for _, index := range node.GetIndex() { + tagOids[mibPrefix+index.Name] = struct{}{} + } + + // grabs all columns from the table + // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName + _, col = node.GetColumns() + + return col, tagOids +} + +//nolint:revive //Too many return variable but necessary +func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, node gosmi.SmiNode, err error) { + var out gosmi.SmiNode + var end string + if strings.ContainsAny(oid, "::") { + // split given oid + // for example RFC1213-MIB::sysUpTime.0 + s := strings.SplitN(oid, "::", 2) + // moduleName becomes RFC1213 + moduleName := s[0] + module, err := gosmi.GetModule(moduleName) + if err != nil { + return oid, oid, oid, "", gosmi.SmiNode{}, err + } + if s[1] == "" { + return "", oid, oid, "", gosmi.SmiNode{}, fmt.Errorf("cannot parse %v", oid) + } + // node becomes sysUpTime.0 + node := s[1] + if strings.ContainsAny(node, ".") { + s = strings.SplitN(node, ".", 2) + // node becomes sysUpTime + node = s[0] + end = "." + s[1] + } + + out, err = module.GetNode(node) + if err != nil { + return oid, oid, oid, "", out, err + } + + if oidNum = out.RenderNumeric(); oidNum == "" { + return oid, oid, oid, "", out, fmt.Errorf("cannot translate %v into a numeric OID, please ensure all imported MIBs are in the path", oid) + } + + oidNum = "." + oidNum + end + } else if strings.ContainsAny(oid, "abcdefghijklnmopqrstuvwxyz") { + //handle mixed oid ex. .iso.2.3 + s := strings.Split(oid, ".") + for i := range s { + if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") { + out, err = gosmi.GetNode(s[i]) + if err != nil { + return oid, oid, oid, "", out, err + } + s[i] = out.RenderNumeric() + } + } + oidNum = strings.Join(s, ".") + out, _ = gosmi.GetNodeByOID(types.OidMustFromString(oidNum)) + } else { + out, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + oidNum = oid + // ensure modules are loaded or node will be empty (might not error) + //nolint:nilerr // do not return the err as the oid is numeric and telegraf can continue + if err != nil || out.Name == "iso" { + return oid, oid, oid, "", out, nil + } + } + + tc := out.GetSubtree() + + for i := range tc { + // case where the mib doesn't have a conversion so Type struct will be nil + // prevents seg fault + if tc[i].Type == nil { + break + } + switch tc[i].Type.Name { + case "MacAddress", "PhysAddress": + conversion = "hwaddr" + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": + conversion = "ipaddr" + } + } + + oidText = out.RenderQualified() + i := strings.Index(oidText, "::") + if i == -1 { + return "", oid, oid, "", out, errors.New("not found") + } + mibName = oidText[:i] + oidText = oidText[i+2:] + end + + return mibName, oidNum, oidText, conversion, out, nil +} + +// The following is for snmp_trap +type MibEntry struct { + MibName string + OidText string +} + +func TrapLookup(oid string) (e MibEntry, err error) { + var givenOid types.Oid + if givenOid, err = types.OidFromString(oid); err != nil { + return e, fmt.Errorf("could not convert OID %s: %w", oid, err) + } + + // Get node name + var node gosmi.SmiNode + if node, err = gosmi.GetNodeByOID(givenOid); err != nil { + return e, err + } + e.OidText = node.Name + + // Add not found OID part + if !givenOid.Equals(node.Oid) { + e.OidText += "." + givenOid[len(node.Oid):].String() + } + + // Get module name + module := node.GetModule() + if module.Name != "" { + e.MibName = module.Name + } + + return e, nil +} diff --git a/plugins/inputs/snmp/gosmi_test.go b/internal/snmp/translator_gosmi_test.go similarity index 53% rename from plugins/inputs/snmp/gosmi_test.go rename to internal/snmp/translator_gosmi_test.go index 3e247739a5233..a777ce9312b7a 100644 --- a/plugins/inputs/snmp/gosmi_test.go +++ b/internal/snmp/translator_gosmi_test.go @@ -1,20 +1,17 @@ package snmp import ( - "errors" "path/filepath" "testing" - "time" "github.com/gosnmp/gosnmp" "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/testutil" ) func getGosmiTr(t *testing.T) Translator { - testDataPath, err := filepath.Abs("./testdata") + testDataPath, err := filepath.Abs("./testdata/gosmi") require.NoError(t, err) tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) @@ -31,56 +28,8 @@ func TestGosmiTranslator(t *testing.T) { require.NotNil(t, tr) } -// gosmi uses the same connection struct as netsnmp but has a few -// different test cases, so it has its own copy -var gosmiTsc = &testSNMPConnection{ - host: "tsc", - values: map[string]interface{}{ - ".1.3.6.1.2.1.3.1.1.1.0": "foo", - ".1.3.6.1.2.1.3.1.1.1.1": []byte("bar"), - ".1.3.6.1.2.1.3.1.1.1.2": []byte(""), - ".1.3.6.1.2.1.3.1.1.102": "bad", - ".1.3.6.1.2.1.3.1.1.2.0": 1, - ".1.3.6.1.2.1.3.1.1.2.1": 2, - ".1.3.6.1.2.1.3.1.1.2.2": 0, - ".1.3.6.1.2.1.3.1.1.3.0": "1.3.6.1.2.1.3.1.1.3", - ".1.3.6.1.2.1.3.1.1.5.0": 123456, - ".1.0.0.0.1.1.0": "foo", - ".1.0.0.0.1.1.1": []byte("bar"), - ".1.0.0.0.1.1.2": []byte(""), - ".1.0.0.0.1.102": "bad", - ".1.0.0.0.1.2.0": 1, - ".1.0.0.0.1.2.1": 2, - ".1.0.0.0.1.2.2": 0, - ".1.0.0.0.1.3.0": "0.123", - ".1.0.0.0.1.3.1": "0.456", - ".1.0.0.0.1.3.2": "0.000", - ".1.0.0.0.1.3.3": "9.999", - ".1.0.0.0.1.5.0": 123456, - ".1.0.0.1.1": "baz", - ".1.0.0.1.2": 234, - ".1.0.0.1.3": []byte("byte slice"), - ".1.0.0.2.1.5.0.9.9": 11, - ".1.0.0.2.1.5.1.9.9": 22, - ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", - ".1.0.0.3.1.1.10": "instance", - ".1.0.0.3.1.1.11": "instance2", - ".1.0.0.3.1.1.12": "instance3", - ".1.0.0.3.1.2.10": 10, - ".1.0.0.3.1.2.11": 20, - ".1.0.0.3.1.2.12": 20, - ".1.0.0.3.1.3.10": 1, - ".1.0.0.3.1.3.11": 2, - ".1.0.0.3.1.3.12": 3, - }, -} - func TestFieldInitGosmi(t *testing.T) { - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) - require.NoError(t, err) + tr := getGosmiTr(t) translations := []struct { inputOid string @@ -102,124 +51,50 @@ func TestFieldInitGosmi(t *testing.T) { for _, txl := range translations { f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} - err := f.init(tr) - require.NoError(t, err, "inputOid=%q inputName=%q", txl.inputOid, txl.inputName) + require.NoError(t, f.Init(tr), "inputOid=%q inputName=%q", txl.inputOid, txl.inputName) require.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) require.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) + require.Equal(t, txl.expectedConversion, f.Conversion, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) } } func TestTableInitGosmi(t *testing.T) { - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - s := &Snmp{ - ClientConfig: snmp.ClientConfig{ - Path: []string{testDataPath}, - Translator: "gosmi", - }, - Tables: []Table{ - {Oid: ".1.3.6.1.2.1.3.1", - Fields: []Field{ - {Oid: ".999", Name: "foo"}, - {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true}, - {Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress"}, - }}, - }, - } - err = s.Init() - require.NoError(t, err) - - require.Equal(t, "atTable", s.Tables[0].Name) - - require.Len(t, s.Tables[0].Fields, 5) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".999", Name: "foo", initialized: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, IsTag: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", initialized: true, Conversion: "hwaddr"}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true}) -} - -func TestSnmpInitGosmi(t *testing.T) { - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - s := &Snmp{ - Tables: []Table{ - {Oid: "RFC1213-MIB::atTable"}, - }, - Fields: []Field{ - {Oid: "RFC1213-MIB::atPhysAddress"}, - }, - ClientConfig: snmp.ClientConfig{ - Path: []string{testDataPath}, - Translator: "gosmi", - }, - } - - err = s.Init() - require.NoError(t, err) - - require.Len(t, s.Tables[0].Fields, 3) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true, initialized: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", initialized: true, Conversion: "hwaddr"}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", IsTag: true, initialized: true}) - - require.Equal(t, Field{ - Oid: ".1.3.6.1.2.1.3.1.1.2", - Name: "atPhysAddress", - Conversion: "hwaddr", - initialized: true, - }, s.Fields[0]) -} - -func TestSnmpInit_noTranslateGosmi(t *testing.T) { - s := &Snmp{ + tbl := Table{ + Oid: ".1.3.6.1.2.1.3.1", Fields: []Field{ - {Oid: ".9.1.1.1.1", Name: "one", IsTag: true}, - {Oid: ".9.1.1.1.2", Name: "two"}, - {Oid: ".9.1.1.1.3"}, - }, - Tables: []Table{ - {Name: "testing", - Fields: []Field{ - {Oid: ".9.1.1.1.4", Name: "four", IsTag: true}, - {Oid: ".9.1.1.1.5", Name: "five"}, - {Oid: ".9.1.1.1.6"}, - }}, - }, - ClientConfig: snmp.ClientConfig{ - Path: []string{}, - Translator: "gosmi", + {Oid: ".999", Name: "foo"}, + {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true}, + {Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress"}, }, } - err := s.Init() - require.NoError(t, err) + tr := getGosmiTr(t) + require.NoError(t, tbl.Init(tr)) - require.Equal(t, ".9.1.1.1.1", s.Fields[0].Oid) - require.Equal(t, "one", s.Fields[0].Name) - require.True(t, s.Fields[0].IsTag) + require.Equal(t, "atTable", tbl.Name) - require.Equal(t, ".9.1.1.1.2", s.Fields[1].Oid) - require.Equal(t, "two", s.Fields[1].Name) - require.False(t, s.Fields[1].IsTag) + require.Len(t, tbl.Fields, 5) - require.Equal(t, ".9.1.1.1.3", s.Fields[2].Oid) - require.Equal(t, ".9.1.1.1.3", s.Fields[2].Name) - require.False(t, s.Fields[2].IsTag) + require.Equal(t, ".999", tbl.Fields[0].Oid) + require.Equal(t, "foo", tbl.Fields[0].Name) + require.False(t, tbl.Fields[0].IsTag) + require.Empty(t, tbl.Fields[0].Conversion) - require.Equal(t, ".9.1.1.1.4", s.Tables[0].Fields[0].Oid) - require.Equal(t, "four", s.Tables[0].Fields[0].Name) - require.True(t, s.Tables[0].Fields[0].IsTag) + require.Equal(t, ".1.3.6.1.2.1.3.1.1.1", tbl.Fields[1].Oid) + require.Equal(t, "atIfIndex", tbl.Fields[1].Name) + require.True(t, tbl.Fields[1].IsTag) + require.Empty(t, tbl.Fields[1].Conversion) - require.Equal(t, ".9.1.1.1.5", s.Tables[0].Fields[1].Oid) - require.Equal(t, "five", s.Tables[0].Fields[1].Name) - require.False(t, s.Tables[0].Fields[1].IsTag) + require.Equal(t, ".1.3.6.1.2.1.3.1.1.2", tbl.Fields[2].Oid) + require.Equal(t, "atPhysAddress", tbl.Fields[2].Name) + require.False(t, tbl.Fields[2].IsTag) + require.Equal(t, "hwaddr", tbl.Fields[2].Conversion) - require.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Oid) - require.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Name) - require.False(t, s.Tables[0].Fields[2].IsTag) + require.Equal(t, ".1.3.6.1.2.1.3.1.1.3", tbl.Fields[4].Oid) + require.Equal(t, "atNetAddress", tbl.Fields[4].Name) + require.True(t, tbl.Fields[4].IsTag) + require.Empty(t, tbl.Fields[4].Conversion) } // TestTableBuild_walk in snmp_test.go is split into two tests here, @@ -259,13 +134,7 @@ func TestTableBuild_walk_noTranslate(t *testing.T) { }, } - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) - require.NoError(t, err) - - tb, err := tbl.Build(gosmiTsc, true, tr) + tb, err := tbl.Build(tsc, true) require.NoError(t, err) require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ @@ -317,12 +186,6 @@ func TestTableBuild_walk_noTranslate(t *testing.T) { } func TestTableBuild_walk_Translate(t *testing.T) { - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) - require.NoError(t, err) - tbl := Table{ Name: "atTable", IndexAsTag: true, @@ -345,9 +208,8 @@ func TestTableBuild_walk_Translate(t *testing.T) { }, } - err = tbl.Init(tr) - require.NoError(t, err) - tb, err := tbl.Build(gosmiTsc, true, tr) + require.NoError(t, tbl.Init(getGosmiTr(t))) + tb, err := tbl.Build(tsc, true) require.NoError(t, err) require.Equal(t, "atTable", tb.Name) @@ -387,12 +249,6 @@ func TestTableBuild_walk_Translate(t *testing.T) { } func TestTableBuild_noWalkGosmi(t *testing.T) { - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) - require.NoError(t, err) - tbl := Table{ Name: "mytable", Fields: []Field{ @@ -421,7 +277,7 @@ func TestTableBuild_noWalkGosmi(t *testing.T) { }, } - tb, err := tbl.Build(gosmiTsc, false, tr) + tb, err := tbl.Build(tsc, false) require.NoError(t, err) rtr := RTableRow{ @@ -432,103 +288,6 @@ func TestTableBuild_noWalkGosmi(t *testing.T) { require.Contains(t, tb.Rows, rtr) } -func TestGatherGosmi(t *testing.T) { - s := &Snmp{ - Agents: []string{"TestGather"}, - Name: "mytable", - Fields: []Field{ - { - Name: "myfield1", - Oid: ".1.0.0.1.1", - IsTag: true, - }, - { - Name: "myfield2", - Oid: ".1.0.0.1.2", - }, - { - Name: "myfield3", - Oid: "1.0.0.1.1", - }, - }, - Tables: []Table{ - { - Name: "myOtherTable", - InheritTags: []string{"myfield1"}, - Fields: []Field{ - { - Name: "myOtherField", - Oid: ".1.0.0.0.1.5", - }, - }, - }, - }, - - connectionCache: []snmpConnection{ - gosmiTsc, - }, - - ClientConfig: snmp.ClientConfig{ - Path: []string{"testdata"}, - Translator: "gosmi", - }, - } - acc := &testutil.Accumulator{} - - tstart := time.Now() - require.NoError(t, s.Gather(acc)) - tstop := time.Now() - - require.Len(t, acc.Metrics, 2) - - m := acc.Metrics[0] - require.Equal(t, "mytable", m.Measurement) - require.Equal(t, "tsc", m.Tags[s.AgentHostTag]) - require.Equal(t, "baz", m.Tags["myfield1"]) - require.Len(t, m.Fields, 2) - require.Equal(t, 234, m.Fields["myfield2"]) - require.Equal(t, "baz", m.Fields["myfield3"]) - require.False(t, tstart.After(m.Time)) - require.False(t, tstop.Before(m.Time)) - - m2 := acc.Metrics[1] - require.Equal(t, "myOtherTable", m2.Measurement) - require.Equal(t, "tsc", m2.Tags[s.AgentHostTag]) - require.Equal(t, "baz", m2.Tags["myfield1"]) - require.Len(t, m2.Fields, 1) - require.Equal(t, 123456, m2.Fields["myOtherField"]) -} - -func TestGather_hostGosmi(t *testing.T) { - s := &Snmp{ - Agents: []string{"TestGather"}, - Name: "mytable", - Fields: []Field{ - { - Name: "host", - Oid: ".1.0.0.1.1", - IsTag: true, - }, - { - Name: "myfield2", - Oid: ".1.0.0.1.2", - }, - }, - - connectionCache: []snmpConnection{ - gosmiTsc, - }, - } - - acc := &testutil.Accumulator{} - - require.NoError(t, s.Gather(acc)) - - require.Len(t, acc.Metrics, 1) - m := acc.Metrics[0] - require.Equal(t, "baz", m.Tags["host"]) -} - func TestFieldConvertGosmi(t *testing.T) { testTable := []struct { input interface{} @@ -585,77 +344,18 @@ func TestFieldConvertGosmi(t *testing.T) { } for _, tc := range testTable { - act, err := fieldConvert(getGosmiTr(t), tc.conv, gosnmp.SnmpPDU{Name: ".1.3.6.1.2.1.2.2.1.8", Value: tc.input}) + f := Field{ + Name: "test", + Conversion: tc.conv, + } + require.NoError(t, f.Init(getGosmiTr(t))) + + act, err := f.Convert(gosnmp.SnmpPDU{Name: ".1.3.6.1.2.1.2.2.1.8", Value: tc.input}) require.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) require.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) } } -func TestSnmpTranslateCache_missGosmi(t *testing.T) { - gosmiSnmpTranslateCaches = nil - oid := "IF-MIB::ifPhysAddress.1" - mibName, oidNum, oidText, conversion, err := getGosmiTr(t).SnmpTranslate(oid) - require.Len(t, gosmiSnmpTranslateCaches, 1) - stc := gosmiSnmpTranslateCaches[oid] - require.NotNil(t, stc) - require.Equal(t, mibName, stc.mibName) - require.Equal(t, oidNum, stc.oidNum) - require.Equal(t, oidText, stc.oidText) - require.Equal(t, conversion, stc.conversion) - require.Equal(t, err, stc.err) -} - -func TestSnmpTranslateCache_hitGosmi(t *testing.T) { - gosmiSnmpTranslateCaches = map[string]gosmiSnmpTranslateCache{ - "foo": { - mibName: "a", - oidNum: "b", - oidText: "c", - conversion: "d", - err: errors.New("e"), - }, - } - mibName, oidNum, oidText, conversion, err := getGosmiTr(t).SnmpTranslate("foo") - require.Equal(t, "a", mibName) - require.Equal(t, "b", oidNum) - require.Equal(t, "c", oidText) - require.Equal(t, "d", conversion) - require.Equal(t, errors.New("e"), err) - gosmiSnmpTranslateCaches = nil -} - -func TestSnmpTableCache_missGosmi(t *testing.T) { - gosmiSnmpTableCaches = nil - oid := ".1.0.0.0" - mibName, oidNum, oidText, fields, err := getGosmiTr(t).SnmpTable(oid) - require.Len(t, gosmiSnmpTableCaches, 1) - stc := gosmiSnmpTableCaches[oid] - require.NotNil(t, stc) - require.Equal(t, mibName, stc.mibName) - require.Equal(t, oidNum, stc.oidNum) - require.Equal(t, oidText, stc.oidText) - require.Equal(t, fields, stc.fields) - require.Equal(t, err, stc.err) -} - -func TestSnmpTableCache_hitGosmi(t *testing.T) { - gosmiSnmpTableCaches = map[string]gosmiSnmpTableCache{ - "foo": { - mibName: "a", - oidNum: "b", - oidText: "c", - fields: []Field{{Name: "d"}}, - err: errors.New("e"), - }, - } - mibName, oidNum, oidText, fields, err := getGosmiTr(t).SnmpTable("foo") - require.Equal(t, "a", mibName) - require.Equal(t, "b", oidNum) - require.Equal(t, "c", oidText) - require.Equal(t, []Field{{Name: "d"}}, fields) - require.Equal(t, errors.New("e"), err) -} - func TestTableJoin_walkGosmi(t *testing.T) { tbl := Table{ Name: "mytable", @@ -689,13 +389,8 @@ func TestTableJoin_walkGosmi(t *testing.T) { }, } - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) - require.NoError(t, err) - - tb, err := tbl.Build(gosmiTsc, true, tr) + require.NoError(t, tbl.Init(getGosmiTr(t))) + tb, err := tbl.Build(tsc, true) require.NoError(t, err) require.Equal(t, "mytable", tb.Name) @@ -772,13 +467,7 @@ func TestTableOuterJoin_walkGosmi(t *testing.T) { }, } - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) - require.NoError(t, err) - - tb, err := tbl.Build(gosmiTsc, true, tr) + tb, err := tbl.Build(tsc, true) require.NoError(t, err) require.Equal(t, "mytable", tb.Name) @@ -864,13 +553,7 @@ func TestTableJoinNoIndexAsTag_walkGosmi(t *testing.T) { }, } - testDataPath, err := filepath.Abs("./testdata") - require.NoError(t, err) - - tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) - require.NoError(t, err) - - tb, err := tbl.Build(gosmiTsc, true, tr) + tb, err := tbl.Build(tsc, true) require.NoError(t, err) require.Equal(t, "mytable", tb.Name) @@ -913,33 +596,91 @@ func TestTableJoinNoIndexAsTag_walkGosmi(t *testing.T) { require.Contains(t, tb.Rows, rtr3) } -func BenchmarkMibLoading(b *testing.B) { - log := testutil.Logger{} - path := []string{"testdata"} - for i := 0; i < b.N; i++ { - err := snmp.LoadMibsFromPath(path, log, &snmp.GosmiMibLoader{}) - require.NoError(b, err) +func TestCanNotParse(t *testing.T) { + tr := getGosmiTr(t) + f := Field{ + Oid: "RFC1213-MIB::", } + + require.Error(t, f.Init(tr)) } -func TestCanNotParse(t *testing.T) { - s := &Snmp{ - Fields: []Field{ - {Oid: "RFC1213-MIB::"}, +func TestTrapLookup(t *testing.T) { + tests := []struct { + name string + oid string + expected MibEntry + }{ + { + name: "Known trap OID", + oid: ".1.3.6.1.6.3.1.1.5.1", + expected: MibEntry{ + MibName: "TGTEST-MIB", + OidText: "coldStart", + }, }, - ClientConfig: snmp.ClientConfig{ - Path: []string{"testdata"}, - Translator: "gosmi", + { + name: "Known trap value OID", + oid: ".1.3.6.1.2.1.1.3.0", + expected: MibEntry{ + MibName: "TGTEST-MIB", + OidText: "sysUpTimeInstance", + }, + }, + { + name: "Unknown enterprise sub-OID", + oid: ".1.3.6.1.4.1.0.1.2.3", + expected: MibEntry{ + MibName: "SNMPv2-SMI", + OidText: "enterprises.0.1.2.3", + }, + }, + { + name: "Unknown MIB", + oid: ".1.999", + expected: MibEntry{OidText: "iso.999"}, }, } - err := s.Init() - require.Error(t, err) + // Load the MIBs + getGosmiTr(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Run the actual test + actual, err := TrapLookup(tt.oid) + require.NoError(t, err) + require.Equal(t, tt.expected, actual) + }) + } } -func TestMissingMibPath(t *testing.T) { - log := testutil.Logger{} - path := []string{"non-existing-directory"} - err := snmp.LoadMibsFromPath(path, log, &snmp.GosmiMibLoader{}) - require.NoError(t, err) +func TestTrapLookupFail(t *testing.T) { + tests := []struct { + name string + oid string + expected string + }{ + { + name: "New top level OID", + oid: ".3.6.1.3.0", + expected: "Could not find node for OID 3.6.1.3.0", + }, + { + name: "Malformed OID", + oid: ".1.3.dod.1.3.0", + expected: "could not convert OID .1.3.dod.1.3.0: strconv.ParseUint: parsing \"dod\": invalid syntax", + }, + } + + // Load the MIBs + getGosmiTr(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Run the actual test + _, err := TrapLookup(tt.oid) + require.EqualError(t, err, tt.expected) + }) + } } diff --git a/plugins/inputs/snmp/netsnmp.go b/internal/snmp/translator_netsnmp.go similarity index 88% rename from plugins/inputs/snmp/netsnmp.go rename to internal/snmp/translator_netsnmp.go index 6943fc511ac38..66f2e463daf49 100644 --- a/plugins/inputs/snmp/netsnmp.go +++ b/internal/snmp/translator_netsnmp.go @@ -5,21 +5,21 @@ import ( "bytes" "errors" "fmt" - "log" //nolint:depguard // Allow exceptional but valid use of log here. "os/exec" "strings" "sync" - "github.com/influxdata/wlog" + "github.com/influxdata/telegraf" ) // struct that implements the translator interface. This calls existing // code to exec netsnmp's snmptranslate program type netsnmpTranslator struct { + log telegraf.Logger } -func NewNetsnmpTranslator() *netsnmpTranslator { - return &netsnmpTranslator{} +func NewNetsnmpTranslator(log telegraf.Logger) *netsnmpTranslator { + return &netsnmpTranslator{log: log} } type snmpTableCache struct { @@ -35,14 +35,12 @@ var execCommand = exec.Command // execCmd executes the specified command, returning the STDOUT content. // If command exits with error status, the output is captured into the returned error. -func execCmd(arg0 string, args ...string) ([]byte, error) { - if wlog.LogLevel() == wlog.DEBUG { - quoted := make([]string, 0, len(args)) - for _, arg := range args { - quoted = append(quoted, fmt.Sprintf("%q", arg)) - } - log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " ")) +func (n *netsnmpTranslator) execCmd(arg0 string, args ...string) ([]byte, error) { + quoted := make([]string, 0, len(args)) + for _, arg := range args { + quoted = append(quoted, fmt.Sprintf("%q", arg)) } + n.log.Debugf("executing %q %s", arg0, strings.Join(quoted, " ")) out, err := execCommand(arg0, args...).Output() if err != nil { @@ -98,7 +96,7 @@ func (n *netsnmpTranslator) snmpTableCall(oid string) ( // first attempt to get the table's tags tagOids := map[string]struct{}{} // We have to guess that the "entry" oid is `oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info. - if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { + if out, err := n.execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { scanner := bufio.NewScanner(bytes.NewBuffer(out)) for scanner.Scan() { line := scanner.Text() @@ -124,7 +122,7 @@ func (n *netsnmpTranslator) snmpTableCall(oid string) ( } // this won't actually try to run a query. The `-Ch` will just cause it to dump headers. - out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) + out, err := n.execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) if err != nil { return "", "", "", nil, fmt.Errorf("getting table columns: %w", err) } @@ -179,7 +177,7 @@ func (n *netsnmpTranslator) SnmpTranslate(oid string) ( // is worth it. Especially when it would slam the system pretty hard if lots // of lookups are being performed. - stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) + stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = n.snmpTranslateCall(oid) snmpTranslateCaches[oid] = stc } @@ -189,12 +187,12 @@ func (n *netsnmpTranslator) SnmpTranslate(oid string) ( } //nolint:revive //function-result-limit conditionally 5 return results allowed -func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { +func (n *netsnmpTranslator) snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { var out []byte if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { - out, err = execCmd("snmptranslate", "-Td", "-Ob", oid) + out, err = n.execCmd("snmptranslate", "-Td", "-Ob", oid) } else { - out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) + out, err = n.execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) var execErr *exec.Error if errors.As(err, &execErr) && errors.Is(execErr, exec.ErrNotFound) { // Silently discard error if snmptranslate not found and we have a numeric OID. diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/internal/snmp/translator_netsnmp_mocks_generate.go similarity index 97% rename from plugins/inputs/snmp/snmp_mocks_generate.go rename to internal/snmp/translator_netsnmp_mocks_generate.go index f77e569019164..7ca46ff29fd34 100644 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ b/internal/snmp/translator_netsnmp_mocks_generate.go @@ -98,5 +98,5 @@ func generate() error { f.Write([]byte("}\n")) f.Close() - return exec.Command("gofmt", "-w", "snmp_mocks_test.go").Run() + return exec.Command("gofmt", "-w", "translator_netsnmp_mocks_test.go").Run() } diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/internal/snmp/translator_netsnmp_mocks_test.go similarity index 100% rename from plugins/inputs/snmp/snmp_mocks_test.go rename to internal/snmp/translator_netsnmp_mocks_test.go diff --git a/internal/snmp/translator_netsnmp_test.go b/internal/snmp/translator_netsnmp_test.go new file mode 100644 index 0000000000000..a5c88132b596d --- /dev/null +++ b/internal/snmp/translator_netsnmp_test.go @@ -0,0 +1,352 @@ +//go:generate go run -tags generate translator_netsnmp_mocks_generate.go +package snmp + +import ( + "testing" + + "github.com/gosnmp/gosnmp" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestFieldInit(t *testing.T) { + translations := []struct { + inputOid string + inputName string + inputConversion string + expectedOid string + expectedName string + expectedConversion string + }{ + {".1.2.3", "foo", "", ".1.2.3", "foo", ""}, + {".iso.2.3", "foo", "", ".1.2.3", "foo", ""}, + {".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""}, + {".1.0.0.0.1.1.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, + {".999", "", "", ".999", ".999", ""}, + {"TEST::server", "", "", ".1.0.0.0.1.1", "server", ""}, + {"TEST::server.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, + {"TEST::server", "foo", "", ".1.0.0.0.1.1", "foo", ""}, + {"IF-MIB::ifPhysAddress.1", "", "", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "hwaddr"}, + {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, + {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, + {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, + } + + tr := NewNetsnmpTranslator(testutil.Logger{}) + for _, txl := range translations { + f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} + err := f.Init(tr) + require.NoError(t, err, "inputOid=%q inputName=%q", txl.inputOid, txl.inputName) + require.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) + require.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) + } +} + +func TestTableInit(t *testing.T) { + tbl := Table{ + Oid: ".1.0.0.0", + Fields: []Field{ + {Oid: ".999", Name: "foo"}, + {Oid: "TEST::description", Name: "description", IsTag: true}, + }, + } + err := tbl.Init(NewNetsnmpTranslator(testutil.Logger{})) + require.NoError(t, err) + + require.Equal(t, "testTable", tbl.Name) + + require.Len(t, tbl.Fields, 5) + + require.Equal(t, ".999", tbl.Fields[0].Oid) + require.Equal(t, "foo", tbl.Fields[0].Name) + require.False(t, tbl.Fields[0].IsTag) + require.Empty(t, tbl.Fields[0].Conversion) + + require.Equal(t, ".1.0.0.0.1.1", tbl.Fields[2].Oid) + require.Equal(t, "server", tbl.Fields[2].Name) + require.True(t, tbl.Fields[1].IsTag) + require.Empty(t, tbl.Fields[1].Conversion) + + require.Equal(t, ".1.0.0.0.1.2", tbl.Fields[3].Oid) + require.Equal(t, "connections", tbl.Fields[3].Name) + require.False(t, tbl.Fields[3].IsTag) + require.Empty(t, tbl.Fields[3].Conversion) + + require.Equal(t, ".1.0.0.0.1.3", tbl.Fields[4].Oid) + require.Equal(t, "latency", tbl.Fields[4].Name) + require.False(t, tbl.Fields[4].IsTag) + require.Empty(t, tbl.Fields[4].Conversion) + + require.Equal(t, ".1.0.0.0.1.4", tbl.Fields[1].Oid) + require.Equal(t, "description", tbl.Fields[1].Name) + require.True(t, tbl.Fields[1].IsTag) + require.Empty(t, tbl.Fields[1].Conversion) +} + +func TestTableBuild_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.0.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.0.1.3", + Conversion: "float", + }, + { + Name: "myfield4", + Oid: ".1.0.0.2.1.5", + OidIndexSuffix: ".9.9", + }, + { + Name: "myfield5", + Oid: ".1.0.0.2.1.5", + OidIndexLength: 1, + }, + { + Name: "myfield6", + Oid: ".1.0.0.0.1.6", + Translate: true, + }, + { + Name: "myfield7", + Oid: ".1.0.0.0.1.6", + Translate: false, + }, + }, + } + + require.NoError(t, tbl.Init(NewNetsnmpTranslator(testutil.Logger{}))) + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + require.Equal(t, "mytable", tb.Name) + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "foo", + "index": "0", + }, + Fields: map[string]interface{}{ + "myfield2": 1, + "myfield3": float64(0.123), + "myfield4": 11, + "myfield5": 11, + "myfield6": "testTableEntry.7", + "myfield7": ".1.0.0.0.1.7", + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "bar", + "index": "1", + }, + Fields: map[string]interface{}{ + "myfield2": 2, + "myfield3": float64(0.456), + "myfield4": 22, + "myfield5": 22, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "index": "2", + }, + Fields: map[string]interface{}{ + "myfield2": 0, + "myfield3": float64(0.0), + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{ + "index": "3", + }, + Fields: map[string]interface{}{ + "myfield3": float64(9.999), + }, + } + require.Len(t, tb.Rows, 4) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) + require.Contains(t, tb.Rows, rtr4) +} + +func TestTableBuild_noWalk(t *testing.T) { + tbl := Table{ + Name: "mytable", + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.1.2", + IsTag: true, + }, + { + Name: "empty", + Oid: ".1.0.0.0.1.1.2", + }, + { + Name: "noexist", + Oid: ".1.2.3.4.5", + }, + }, + } + + tb, err := tbl.Build(tsc, false) + require.NoError(t, err) + + rtr := RTableRow{ + Tags: map[string]string{"myfield1": "baz", "myfield3": "234"}, + Fields: map[string]interface{}{"myfield2": 234}, + } + require.Len(t, tb.Rows, 1) + require.Contains(t, tb.Rows, rtr) +} + +func TestFieldConvert(t *testing.T) { + testTable := []struct { + input interface{} + conv string + expected interface{} + }{ + {[]byte("foo"), "", "foo"}, + {"0.123", "float", float64(0.123)}, + {[]byte("0.123"), "float", float64(0.123)}, + {float32(0.123), "float", float64(float32(0.123))}, + {float64(0.123), "float", float64(0.123)}, + {float64(0.123123123123), "float", float64(0.123123123123)}, + {123, "float", float64(123)}, + {123, "float(0)", float64(123)}, + {123, "float(4)", float64(0.0123)}, + {int8(123), "float(3)", float64(0.123)}, + {int16(123), "float(3)", float64(0.123)}, + {int32(123), "float(3)", float64(0.123)}, + {int64(123), "float(3)", float64(0.123)}, + {uint(123), "float(3)", float64(0.123)}, + {uint8(123), "float(3)", float64(0.123)}, + {uint16(123), "float(3)", float64(0.123)}, + {uint32(123), "float(3)", float64(0.123)}, + {uint64(123), "float(3)", float64(0.123)}, + {"123", "int", int64(123)}, + {[]byte("123"), "int", int64(123)}, + {"123123123123", "int", int64(123123123123)}, + {[]byte("123123123123"), "int", int64(123123123123)}, + {float32(12.3), "int", int64(12)}, + {float64(12.3), "int", int64(12)}, + {int(123), "int", int64(123)}, + {int8(123), "int", int64(123)}, + {int16(123), "int", int64(123)}, + {int32(123), "int", int64(123)}, + {int64(123), "int", int64(123)}, + {uint(123), "int", int64(123)}, + {uint8(123), "int", int64(123)}, + {uint16(123), "int", int64(123)}, + {uint32(123), "int", int64(123)}, + {uint64(123), "int", int64(123)}, + {[]byte("abcdef"), "hwaddr", "61:62:63:64:65:66"}, + {"abcdef", "hwaddr", "61:62:63:64:65:66"}, + {[]byte("abcd"), "ipaddr", "97.98.99.100"}, + {"abcd", "ipaddr", "97.98.99.100"}, + {[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:BigEndian:uint64", uint64(2602423610063712)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:BigEndian:uint32", uint32(605923)}, + {[]byte{0x00, 0x09}, "hextoint:BigEndian:uint16", uint16(9)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:LittleEndian:uint64", uint64(6934371307618175232)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:LittleEndian:uint32", uint32(3812493568)}, + {[]byte{0x00, 0x09}, "hextoint:LittleEndian:uint16", uint16(2304)}, + } + + for _, tc := range testTable { + f := Field{ + Name: "test", + Conversion: tc.conv, + } + require.NoError(t, f.Init(NewNetsnmpTranslator(testutil.Logger{}))) + + act, err := f.Convert(gosnmp.SnmpPDU{Value: tc.input}) + require.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + require.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + } +} + +func TestSnmpTranslateCache_miss(t *testing.T) { + snmpTranslateCaches = nil + oid := "IF-MIB::ifPhysAddress.1" + mibName, oidNum, oidText, conversion, err := NewNetsnmpTranslator(testutil.Logger{}).SnmpTranslate(oid) + require.Len(t, snmpTranslateCaches, 1) + stc := snmpTranslateCaches[oid] + require.NotNil(t, stc) + require.Equal(t, mibName, stc.mibName) + require.Equal(t, oidNum, stc.oidNum) + require.Equal(t, oidText, stc.oidText) + require.Equal(t, conversion, stc.conversion) + require.Equal(t, err, stc.err) +} + +func TestSnmpTranslateCache_hit(t *testing.T) { + snmpTranslateCaches = map[string]snmpTranslateCache{ + "foo": { + mibName: "a", + oidNum: "b", + oidText: "c", + conversion: "d", + }, + } + mibName, oidNum, oidText, conversion, err := NewNetsnmpTranslator(testutil.Logger{}).SnmpTranslate("foo") + require.Equal(t, "a", mibName) + require.Equal(t, "b", oidNum) + require.Equal(t, "c", oidText) + require.Equal(t, "d", conversion) + require.NoError(t, err) + snmpTranslateCaches = nil +} + +func TestSnmpTableCache_miss(t *testing.T) { + snmpTableCaches = nil + oid := ".1.0.0.0" + mibName, oidNum, oidText, fields, err := NewNetsnmpTranslator(testutil.Logger{}).SnmpTable(oid) + require.Len(t, snmpTableCaches, 1) + stc := snmpTableCaches[oid] + require.NotNil(t, stc) + require.Equal(t, mibName, stc.mibName) + require.Equal(t, oidNum, stc.oidNum) + require.Equal(t, oidText, stc.oidText) + require.Equal(t, fields, stc.fields) + require.Equal(t, err, stc.err) +} + +func TestSnmpTableCache_hit(t *testing.T) { + snmpTableCaches = map[string]snmpTableCache{ + "foo": { + mibName: "a", + oidNum: "b", + oidText: "c", + fields: []Field{{Name: "d"}}, + }, + } + mibName, oidNum, oidText, fields, err := NewNetsnmpTranslator(testutil.Logger{}).SnmpTable("foo") + require.Equal(t, "a", mibName) + require.Equal(t, "b", oidNum) + require.Equal(t, "c", oidText) + require.Equal(t, []Field{{Name: "d"}}, fields) + require.NoError(t, err) +} diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index a6082e463b075..8cf30611050d5 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -11,6 +11,16 @@ import ( "github.com/gosnmp/gosnmp" ) +// Connection is an interface which wraps a *gosnmp.GoSNMP object. +// We interact through an interface so we can mock it out in tests. +type Connection interface { + Host() string + //BulkWalkAll(string) ([]gosnmp.SnmpPDU, error) + Walk(string, gosnmp.WalkFunc) error + Get(oids []string) (*gosnmp.SnmpPacket, error) + Reconnect() error +} + // GosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection. type GosnmpWrapper struct { *gosnmp.GoSNMP diff --git a/internal/snmp/wrapper_test.go b/internal/snmp/wrapper_test.go new file mode 100644 index 0000000000000..10a4896a1df87 --- /dev/null +++ b/internal/snmp/wrapper_test.go @@ -0,0 +1,89 @@ +package snmp + +import "github.com/gosnmp/gosnmp" + +type testSNMPConnection struct { + host string + values map[string]interface{} +} + +func (tsc *testSNMPConnection) Host() string { + return tsc.host +} + +func (tsc *testSNMPConnection) Get(oids []string) (*gosnmp.SnmpPacket, error) { + sp := &gosnmp.SnmpPacket{} + for _, oid := range oids { + v, ok := tsc.values[oid] + if !ok { + sp.Variables = append(sp.Variables, gosnmp.SnmpPDU{ + Name: oid, + Type: gosnmp.NoSuchObject, + }) + continue + } + sp.Variables = append(sp.Variables, gosnmp.SnmpPDU{ + Name: oid, + Value: v, + }) + } + return sp, nil +} +func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error { + for void, v := range tsc.values { + if void == oid || (len(void) > len(oid) && void[:len(oid)+1] == oid+".") { + if err := wf(gosnmp.SnmpPDU{ + Name: void, + Value: v, + }); err != nil { + return err + } + } + } + return nil +} +func (tsc *testSNMPConnection) Reconnect() error { + return nil +} + +var tsc = &testSNMPConnection{ + host: "tsc", + values: map[string]interface{}{ + ".1.0.0.0.1.1.0": "foo", + ".1.0.0.0.1.1.1": []byte("bar"), + ".1.0.0.0.1.1.2": []byte(""), + ".1.0.0.0.1.102": "bad", + ".1.0.0.0.1.2.0": 1, + ".1.0.0.0.1.2.1": 2, + ".1.0.0.0.1.2.2": 0, + ".1.0.0.0.1.3.0": "0.123", + ".1.0.0.0.1.3.1": "0.456", + ".1.0.0.0.1.3.2": "0.000", + ".1.0.0.0.1.3.3": "9.999", + ".1.0.0.0.1.5.0": 123456, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.1.1": "baz", + ".1.0.0.1.2": 234, + ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.2.1.5.0.9.9": 11, + ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, + ".1.3.6.1.2.1.3.1.1.1.0": "foo", + ".1.3.6.1.2.1.3.1.1.1.1": []byte("bar"), + ".1.3.6.1.2.1.3.1.1.1.2": []byte(""), + ".1.3.6.1.2.1.3.1.1.102": "bad", + ".1.3.6.1.2.1.3.1.1.2.0": 1, + ".1.3.6.1.2.1.3.1.1.2.1": 2, + ".1.3.6.1.2.1.3.1.1.2.2": 0, + ".1.3.6.1.2.1.3.1.1.3.0": "1.3.6.1.2.1.3.1.1.3", + ".1.3.6.1.2.1.3.1.1.5.0": 123456, + }, +} diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md index c8439bbc377b9..ae0cbd56e8238 100644 --- a/plugins/inputs/amd_rocm_smi/README.md +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -22,6 +22,12 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath # bin_path = "/opt/rocm/bin/rocm-smi" + ## Optional: specifies plugin behavior regarding missing rocm-smi binary + ## Available choices: + ## - error: telegraf will return an error on startup + ## - ignore: telegraf will ignore this plugin + # startup_error_behavior = "error" + ## Optional: timeout for GPU polling # timeout = "5s" ``` diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go index fa9dddb40c79a..eb890259e12e1 100644 --- a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go @@ -23,8 +23,12 @@ var sampleConfig string const measurement = "amd_rocm_smi" type ROCmSMI struct { - BinPath string - Timeout config.Duration + BinPath string `toml:"bin_path"` + Timeout config.Duration `toml:"timeout"` + StartupErrorBehavior string `toml:"startup_error_behavior"` + Log telegraf.Logger `toml:"-"` + + ignorePlugin bool } func (*ROCmSMI) SampleConfig() string { @@ -33,8 +37,8 @@ func (*ROCmSMI) SampleConfig() string { // Gather implements the telegraf interface func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error { - if _, err := os.Stat(rsmi.BinPath); os.IsNotExist(err) { - return fmt.Errorf("rocm-smi binary not found in path %s, cannot query GPUs statistics", rsmi.BinPath) + if rsmi.ignorePlugin { + return nil } data := rsmi.pollROCmSMI() @@ -46,6 +50,27 @@ func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error { return nil } +func (rsmi *ROCmSMI) Init() error { + if _, err := os.Stat(rsmi.BinPath); os.IsNotExist(err) { + binPath, err := exec.LookPath("rocm-smi") + if err != nil { + switch rsmi.StartupErrorBehavior { + case "ignore": + rsmi.ignorePlugin = true + rsmi.Log.Warnf("rocm-smi not found on the system, ignoring: %s", err) + return nil + case "", "error": + return fmt.Errorf("rocm-smi binary not found in path %s, cannot query GPUs statistics", rsmi.BinPath) + default: + return fmt.Errorf("unknown startup behavior setting: %s", rsmi.StartupErrorBehavior) + } + } + rsmi.BinPath = binPath + } + + return nil +} + func init() { inputs.Add("amd_rocm_smi", func() telegraf.Input { return &ROCmSMI{ diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go index e38e0ff89eae0..9d508cf25c6cb 100644 --- a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go @@ -11,6 +11,51 @@ import ( "github.com/stretchr/testify/require" ) +func TestErrorBehaviorError(t *testing.T) { + // make sure we can't find rocm-smi in $PATH somewhere + os.Unsetenv("PATH") + plugin := &ROCmSMI{ + BinPath: "/random/non-existent/path", + Log: &testutil.Logger{}, + StartupErrorBehavior: "error", + } + require.Error(t, plugin.Init()) +} + +func TestErrorBehaviorDefault(t *testing.T) { + // make sure we can't find rocm-smi in $PATH somewhere + os.Unsetenv("PATH") + plugin := &ROCmSMI{ + BinPath: "/random/non-existent/path", + Log: &testutil.Logger{}, + } + require.Error(t, plugin.Init()) +} + +func TestErrorBehaviorIgnore(t *testing.T) { + // make sure we can't find rocm-smi in $PATH somewhere + os.Unsetenv("PATH") + plugin := &ROCmSMI{ + BinPath: "/random/non-existent/path", + Log: &testutil.Logger{}, + StartupErrorBehavior: "ignore", + } + require.NoError(t, plugin.Init()) + acc := testutil.Accumulator{} + require.NoError(t, plugin.Gather(&acc)) +} + +func TestErrorBehaviorInvalidOption(t *testing.T) { + // make sure we can't find rocm-smi in $PATH somewhere + os.Unsetenv("PATH") + plugin := &ROCmSMI{ + BinPath: "/random/non-existent/path", + Log: &testutil.Logger{}, + StartupErrorBehavior: "giveup", + } + require.Error(t, plugin.Init()) +} + func TestGatherValidJSON(t *testing.T) { tests := []struct { name string diff --git a/plugins/inputs/amd_rocm_smi/sample.conf b/plugins/inputs/amd_rocm_smi/sample.conf index aed15aae966c3..b4fb9ebb12c62 100644 --- a/plugins/inputs/amd_rocm_smi/sample.conf +++ b/plugins/inputs/amd_rocm_smi/sample.conf @@ -3,5 +3,11 @@ ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath # bin_path = "/opt/rocm/bin/rocm-smi" + ## Optional: specifies plugin behavior regarding missing rocm-smi binary + ## Available choices: + ## - error: telegraf will return an error on startup + ## - ignore: telegraf will ignore this plugin + # startup_error_behavior = "error" + ## Optional: timeout for GPU polling # timeout = "5s" diff --git a/plugins/inputs/gnmi/path.go b/plugins/inputs/gnmi/path.go index f48ff5a3d986a..cc63814cd5085 100644 --- a/plugins/inputs/gnmi/path.go +++ b/plugins/inputs/gnmi/path.go @@ -141,6 +141,7 @@ func (pi *pathInfo) append(paths ...*gnmiLib.Path) *pathInfo { path.keyValues = append(path.keyValues, keyInfo) } } + path.normalize() return path } @@ -172,6 +173,7 @@ func (pi *pathInfo) appendSegments(segments ...string) *pathInfo { } path.segments = append(path.segments, s) } + path.normalize() return path } diff --git a/plugins/inputs/gnmi/testcases/issue_14833/expected.out b/plugins/inputs/gnmi/testcases/issue_14833/expected.out new file mode 100644 index 0000000000000..d04628a2cca07 --- /dev/null +++ b/plugins/inputs/gnmi/testcases/issue_14833/expected.out @@ -0,0 +1,2 @@ +interfaces-counter,name=AppGigabitEthernet1/0/1,source=127.0.0.1 discontinuity_time="2023-11-15T05:50:40+00:00" 1708069483623763000 +interfaces-counter,name=FortyGigabitEthernet1/1/1,source=127.0.0.1 discontinuity_time="2023-11-15T05:50:40+00:00" 1708069483623763000 diff --git a/plugins/inputs/gnmi/testcases/issue_14833/responses.json b/plugins/inputs/gnmi/testcases/issue_14833/responses.json new file mode 100644 index 0000000000000..fd84cf21ad1f2 --- /dev/null +++ b/plugins/inputs/gnmi/testcases/issue_14833/responses.json @@ -0,0 +1,51 @@ +[ + { + "update": { + "timestamp": "1708069483623763000", + "update": [ + { + "path": { + "elem": [ + { + "name": "Cisco-IOS-XE-interfaces-oper:interfaces" + }, + { + "name": "interface", + "key": { + "name": "AppGigabitEthernet1/0/1" + } + }, + { + "name": "statistics" + } + ] + }, + "val": { + "jsonIetfVal": "eyJkaXNjb250aW51aXR5LXRpbWUiOiIyMDIzLTExLTE1VDA1OjUwOjQwKzAwOjAwIn0K" + } + }, + { + "path": { + "elem": [ + { + "name": "Cisco-IOS-XE-interfaces-oper:interfaces" + }, + { + "name": "interface", + "key": { + "name": "FortyGigabitEthernet1/1/1" + } + }, + { + "name": "statistics" + } + ] + }, + "val": { + "jsonIetfVal": "eyJkaXNjb250aW51aXR5LXRpbWUiOiIyMDIzLTExLTE1VDA1OjUwOjQwKzAwOjAwIn0K" + } + } + ] + } + } +] \ No newline at end of file diff --git a/plugins/inputs/gnmi/testcases/issue_14833/telegraf.conf b/plugins/inputs/gnmi/testcases/issue_14833/telegraf.conf new file mode 100644 index 0000000000000..2d881f72957fa --- /dev/null +++ b/plugins/inputs/gnmi/testcases/issue_14833/telegraf.conf @@ -0,0 +1,9 @@ +[[inputs.gnmi]] + addresses = ["dummy"] + + [[inputs.gnmi.subscription]] + name = "interfaces-counter" + origin = "rfc7951" + path = "/Cisco-IOS-XE-interfaces-oper:interfaces/interface/statistics" + subscription_mode = "sample" + sample_interval = "5s" diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index 166bd8567bab6..fdab824178cce 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -5,7 +5,7 @@ import ( "strconv" "strings" - "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon" + "github.com/gorcon/rcon" ) var ( @@ -40,22 +40,12 @@ type connector struct { } func (c *connector) Connect() (Connection, error) { - p, err := strconv.Atoi(c.port) + client, err := rcon.Dial(c.hostname+":"+c.port, c.password) if err != nil { return nil, err } - client, err := rcon.NewClient(c.hostname, p) - if err != nil { - return nil, err - } - - _, err = client.Authorize(c.password) - if err != nil { - return nil, err - } - - return &connection{client: client}, nil + return client, nil } func newClient(connector Connector) *client { @@ -110,18 +100,6 @@ func (c *client) Scores(player string) ([]Score, error) { return parseScores(resp), nil } -type connection struct { - client *rcon.Client -} - -func (c *connection) Execute(command string) (string, error) { - packet, err := c.client.Execute(command) - if err != nil { - return "", err - } - return packet.Body, nil -} - func parsePlayers(input string) []string { parts := strings.SplitAfterN(input, ":", 2) if len(parts) != 2 { diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go deleted file mode 100644 index e75b0b27670cf..0000000000000 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ /dev/null @@ -1,210 +0,0 @@ -// Package rcon implements the communication protocol for communicating -// with RCON servers. Tested and working with Valve game servers. -package rcon - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "net" - "strings" -) - -const ( - PacketPaddingSize uint8 = 2 // Size of Packet's padding. - PacketHeaderSize uint8 = 8 // Size of Packet's header. -) - -const ( - TerminationSequence = "\x00" // Null empty ASCII string suffix. -) - -// Packet type constants. -// https://developer.valvesoftware.com/wiki/Source_RCON_Protocol#Packet_Type -const ( - Exec int32 = 2 - Auth int32 = 3 - AuthResponse int32 = 2 - ResponseValue int32 = 0 -) - -// Rcon package errors. -var ( - ErrInvalidWrite = errors.New("failed to write the payload correctly to remote connection") - ErrInvalidRead = errors.New("failed to read the response correctly from remote connection") - ErrInvalidChallenge = errors.New("server failed to mirror request challenge") - ErrUnauthorizedRequest = errors.New("client not authorized to remote server") - ErrFailedAuthorization = errors.New("failed to authorize to the remote server") -) - -type Client struct { - Host string // The IP address of the remote server. - Port int // The Port the remote server's listening on. - Authorized bool // Has the client been authorized by the server? - Connection net.Conn // The TCP connection to the server. -} - -type Header struct { - Size int32 // The size of the payload. - Challenge int32 // The challenge ths server should mirror. - Type int32 // The type of request being sent. -} - -type Packet struct { - Header Header // Packet header. - Body string // Body of packet. -} - -// Compile converts a packets header and body into its appropriate -// byte array payload, returning an error if the binary packages -// Write method fails to write the header bytes in their little -// endian byte order. -func (p Packet) Compile() (payload []byte, err error) { - var size = p.Header.Size - var buffer bytes.Buffer - var padding [PacketPaddingSize]byte - - if err = binary.Write(&buffer, binary.LittleEndian, &size); nil != err { - return nil, err - } else if err = binary.Write(&buffer, binary.LittleEndian, &p.Header.Challenge); nil != err { - return nil, err - } else if err = binary.Write(&buffer, binary.LittleEndian, &p.Header.Type); nil != err { - return nil, err - } - - buffer.WriteString(p.Body) - buffer.Write(padding[:]) - - return buffer.Bytes(), nil -} - -// NewPacket returns a pointer to a new Packet type. -func NewPacket(challenge, typ int32, body string) (packet *Packet) { - size := int32(len([]byte(body)) + int(PacketHeaderSize+PacketPaddingSize)) - return &Packet{Header{size, challenge, typ}, body} -} - -// Authorize calls Send with the appropriate command type and the provided -// password. The response packet is returned if authorization is successful -// or a potential error. -func (c *Client) Authorize(password string) (response *Packet, err error) { - if response, err = c.Send(Auth, password); nil == err { - if response.Header.Type != AuthResponse { - return nil, ErrFailedAuthorization - } - c.Authorized = true - } - - return response, err -} - -// Execute calls Send with the appropriate command type and the provided -// command. The response packet is returned if the command executed successfully -// or a potential error. -func (c *Client) Execute(command string) (response *Packet, err error) { - return c.Send(Exec, command) -} - -// Send accepts the commands type and its string to execute to the clients server, -// creating a packet with a random challenge id for the server to mirror, -// and compiling its payload bytes in the appropriate order. The response is -// decompiled from its bytes into a Packet type for return. An error is returned -// if send fails. -func (c *Client) Send(typ int32, command string) (*Packet, error) { - if typ != Auth && !c.Authorized { - return nil, ErrUnauthorizedRequest - } - - // Create a random challenge for the server to mirror in its response. - var challenge int32 - if err := binary.Read(rand.Reader, binary.LittleEndian, &challenge); nil != err { - return nil, err - } - - // Create the packet from the challenge, typ and command - // and compile it to its byte payload - packet := NewPacket(challenge, typ, command) - payload, err := packet.Compile() - if nil != err { - return nil, err - } - - n, err := c.Connection.Write(payload) - if nil != err { - return nil, err - } - if n != len(payload) { - return nil, ErrInvalidWrite - } - - var header Header - if err := binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { - return nil, err - } - if err := binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { - return nil, err - } - if err := binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { - return nil, err - } - - if packet.Header.Type == Auth && header.Type == ResponseValue { - // Discard, empty SERVERDATA_RESPONSE_VALUE from authorization. - if _, err := c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))); nil != err { - return nil, err - } - - // Reread the packet header. - if err := binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { - return nil, err - } - if err := binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { - return nil, err - } - if err := binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { - return nil, err - } - } - - if header.Challenge != packet.Header.Challenge { - return nil, ErrInvalidChallenge - } - - body := make([]byte, header.Size-int32(PacketHeaderSize)) - n, err = c.Connection.Read(body) - for n < len(body) { - var nBytes int - nBytes, err = c.Connection.Read(body[n:]) - if err != nil { - return nil, err - } - n += nBytes - } - - // Shouldn't this be moved up to the first read? - if nil != err { - return nil, err - } - if n != len(body) { - return nil, ErrInvalidRead - } - - response := new(Packet) - response.Header = header - response.Body = strings.TrimRight(string(body), TerminationSequence) - - return response, nil -} - -// NewClient creates a new Client type, creating the connection -// to the server specified by the host and port arguments. If -// the connection fails, an error is returned. -func NewClient(host string, port int) (client *Client, err error) { - client = new(Client) - client.Host = host - client.Port = port - client.Connection, err = net.Dial("tcp", fmt.Sprintf("%v:%v", client.Host, client.Port)) - return client, err -} diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 78e7320900abc..f96b6184c68bd 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -73,17 +73,17 @@ type JSONMetrics struct { } `json:"processes"` } -type metric map[string]int64 -type poolStat map[string]metric +type metricStat map[string]int64 +type poolStat map[string]metricStat type phpfpm struct { Format string `toml:"format"` Timeout config.Duration `toml:"timeout"` Urls []string `toml:"urls"` - + Log telegraf.Logger `toml:"-"` tls.ClientConfig + client *http.Client - Log telegraf.Logger } func (*phpfpm) SampleConfig() string { @@ -91,6 +91,10 @@ func (*phpfpm) SampleConfig() string { } func (p *phpfpm) Init() error { + if len(p.Urls) == 0 { + p.Urls = []string{"http://127.0.0.1/status"} + } + tlsCfg, err := p.ClientConfig.TLSConfig() if err != nil { return err @@ -117,18 +121,8 @@ func (p *phpfpm) Init() error { // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). func (p *phpfpm) Gather(acc telegraf.Accumulator) error { - if len(p.Urls) == 0 { - return p.gatherServer("http://127.0.0.1/status", acc) - } - var wg sync.WaitGroup - - urls, err := expandUrls(p.Urls) - if err != nil { - return err - } - - for _, serv := range urls { + for _, serv := range expandUrls(acc, p.Urls) { wg.Add(1) go func(serv string) { defer wg.Done() @@ -259,7 +253,7 @@ func parseLines(r io.Reader, acc telegraf.Accumulator, addr string) { // We start to gather data for a new pool here if fieldName == PfPool { currentPool = strings.Trim(keyvalue[1], " ") - stats[currentPool] = make(metric) + stats[currentPool] = make(metricStat) continue } @@ -347,7 +341,7 @@ func (p *phpfpm) parseJSON(r io.Reader, acc telegraf.Accumulator, addr string) { } } -func expandUrls(urls []string) ([]string, error) { +func expandUrls(acc telegraf.Accumulator, urls []string) []string { addrs := make([]string, 0, len(urls)) for _, address := range urls { if isNetworkURL(address) { @@ -356,11 +350,12 @@ func expandUrls(urls []string) ([]string, error) { } paths, err := globUnixSocket(address) if err != nil { - return nil, err + acc.AddError(err) + continue } addrs = append(addrs, paths...) } - return addrs, nil + return addrs } func globUnixSocket(address string) ([]string, error) { diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 2f760e3d658a6..38edc92289814 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -19,9 +19,12 @@ import ( "os" "strconv" "testing" + "time" "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/common/shim" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/testutil" @@ -49,6 +52,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { url := ts.URL + "?test=ok" r := &phpfpm{ Urls: []string{url}, + Log: &testutil.Logger{}, } require.NoError(t, r.Init()) @@ -96,7 +100,7 @@ func TestPhpFpmGeneratesJSONMetrics_From_Http(t *testing.T) { input := &phpfpm{ Urls: []string{server.URL + "?full&json"}, Format: "json", - Log: testutil.Logger{}, + Log: &testutil.Logger{}, } require.NoError(t, input.Init()) @@ -117,8 +121,8 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { //Now we tested again above server r := &phpfpm{ Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, + Log: &testutil.Logger{}, } - require.NoError(t, r.Init()) var acc testutil.Accumulator @@ -161,12 +165,11 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { r := &phpfpm{ Urls: []string{tcp.Addr().String()}, + Log: &testutil.Logger{}, } - require.NoError(t, r.Init()) var acc testutil.Accumulator - require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ @@ -214,14 +217,12 @@ func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) { r := &phpfpm{ Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"}, + Log: &testutil.Logger{}, } - require.NoError(t, r.Init()) var acc1, acc2 testutil.Accumulator - require.NoError(t, acc1.GatherError(r.Gather)) - require.NoError(t, acc2.GatherError(r.Gather)) tags1 := map[string]string{ @@ -267,12 +268,11 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { r := &phpfpm{ Urls: []string{tcp.Addr().String() + ":custom-status-path"}, + Log: &testutil.Logger{}, } - require.NoError(t, r.Init()) var acc testutil.Accumulator - require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ @@ -300,15 +300,14 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { // When not passing server config, we default to localhost // We just want to make sure we did request stat from localhost func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { - r := &phpfpm{Urls: []string{"http://bad.localhost:62001/status"}} - + r := &phpfpm{ + Urls: []string{"http://bad.localhost:62001/status"}, + Log: &testutil.Logger{}, + } require.NoError(t, r.Init()) var acc testutil.Accumulator - - err := acc.GatherError(r.Gather) - require.Error(t, err) - require.Contains(t, err.Error(), "/status") + require.ErrorContains(t, acc.GatherError(r.Gather), "/status") } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { @@ -318,30 +317,25 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t r := &phpfpm{ Urls: []string{"http://aninvalidone"}, + Log: &testutil.Logger{}, } - require.NoError(t, r.Init()) var acc testutil.Accumulator - err := acc.GatherError(r.Gather) - require.Error(t, err) - require.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) - require.Contains(t, err.Error(), `lookup aninvalidone`) + require.ErrorContains(t, err, `unable to connect to phpfpm status page 'http://aninvalidone'`) + require.ErrorContains(t, err, `lookup aninvalidone`) } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { r := &phpfpm{ Urls: []string{"/tmp/invalid.sock"}, + Log: &testutil.Logger{}, } - require.NoError(t, r.Init()) var acc testutil.Accumulator - - err := acc.GatherError(r.Gather) - require.Error(t, err) - require.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) + require.ErrorContains(t, acc.GatherError(r.Gather), `socket doesn't exist "/tmp/invalid.sock"`) } const outputSample = ` @@ -389,3 +383,48 @@ func TestPhpFpmParseJSON_Log_Error_Without_Panic_When_When_JSON_Is_Invalid(t *te require.NotPanics(t, func() { p.parseJSON(bytes.NewReader(invalidJSON), &testutil.NopAccumulator{}, "") }) require.Contains(t, logOutput.String(), "E! Unable to decode JSON response: invalid character 'X' looking for beginning of value") } + +func TestGatherDespiteUnavailable(t *testing.T) { + // Let OS find an available port + tcp, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err, "Cannot initialize test server") + defer tcp.Close() + + s := statServer{} + go fcgi.Serve(tcp, s) //nolint:errcheck // ignore the returned error as we cannot do anything about it anyway + + //Now we tested again above server + r := &phpfpm{ + Urls: []string{"fcgi://" + tcp.Addr().String() + "/status", "/lala"}, + Log: &testutil.Logger{}, + } + require.NoError(t, r.Init()) + + expected := []telegraf.Metric{ + metric.New( + "phpfpm", + map[string]string{ + "pool": "www", + "url": r.Urls[0], + }, + map[string]interface{}{ + "start_since": int64(1991), + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + }, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.ErrorContains(t, acc.GatherError(r.Gather), "socket doesn't exist") + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 50d7bfbb4f793..922306669463c 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -589,7 +589,7 @@ func TestProcstatLookupMetric(t *testing.T) { var acc testutil.Accumulator require.NoError(t, p.Gather(&acc)) - require.Len(t, acc.GetTelegrafMetrics(), 1) + require.NotEmpty(t, acc.GetTelegrafMetrics()) } func TestGather_SameTimestamps(t *testing.T) { diff --git a/plugins/inputs/snmp/gosmi.go b/plugins/inputs/snmp/gosmi.go deleted file mode 100644 index 50d2a84b96ef6..0000000000000 --- a/plugins/inputs/snmp/gosmi.go +++ /dev/null @@ -1,143 +0,0 @@ -package snmp - -import ( - "fmt" - "sync" - - "github.com/sleepinggenius2/gosmi" - "github.com/sleepinggenius2/gosmi/models" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal/snmp" -) - -type gosmiTranslator struct { -} - -func NewGosmiTranslator(paths []string, log telegraf.Logger) (*gosmiTranslator, error) { - err := snmp.LoadMibsFromPath(paths, log, &snmp.GosmiMibLoader{}) - if err == nil { - return &gosmiTranslator{}, nil - } - return nil, err -} - -type gosmiSnmpTranslateCache struct { - mibName string - oidNum string - oidText string - conversion string - node gosmi.SmiNode - err error -} - -var gosmiSnmpTranslateCachesLock sync.Mutex -var gosmiSnmpTranslateCaches map[string]gosmiSnmpTranslateCache - -//nolint:revive //function-result-limit conditionally 5 return results allowed -func (g *gosmiTranslator) SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { - mibName, oidNum, oidText, conversion, _, err = g.SnmpTranslateFull(oid) - return mibName, oidNum, oidText, conversion, err -} - -//nolint:revive //function-result-limit conditionally 6 return results allowed -func (g *gosmiTranslator) SnmpTranslateFull(oid string) ( - mibName string, oidNum string, oidText string, - conversion string, - node gosmi.SmiNode, - err error) { - gosmiSnmpTranslateCachesLock.Lock() - if gosmiSnmpTranslateCaches == nil { - gosmiSnmpTranslateCaches = map[string]gosmiSnmpTranslateCache{} - } - - var stc gosmiSnmpTranslateCache - var ok bool - if stc, ok = gosmiSnmpTranslateCaches[oid]; !ok { - // This will result in only one call to snmptranslate running at a time. - // We could speed it up by putting a lock in snmpTranslateCache and then - // returning it immediately, and multiple callers would then release the - // snmpTranslateCachesLock and instead wait on the individual - // snmpTranslation.Lock to release. But I don't know that the extra complexity - // is worth it. Especially when it would slam the system pretty hard if lots - // of lookups are being performed. - - stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.node, stc.err = snmp.SnmpTranslateCall(oid) - gosmiSnmpTranslateCaches[oid] = stc - } - - gosmiSnmpTranslateCachesLock.Unlock() - - return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.node, stc.err -} - -type gosmiSnmpTableCache struct { - mibName string - oidNum string - oidText string - fields []Field - err error -} - -var gosmiSnmpTableCaches map[string]gosmiSnmpTableCache -var gosmiSnmpTableCachesLock sync.Mutex - -// snmpTable resolves the given OID as a table, providing information about the -// table and fields within. -// -//nolint:revive //Too many return variable but necessary -func (g *gosmiTranslator) SnmpTable(oid string) ( - mibName string, oidNum string, oidText string, - fields []Field, - err error) { - gosmiSnmpTableCachesLock.Lock() - if gosmiSnmpTableCaches == nil { - gosmiSnmpTableCaches = map[string]gosmiSnmpTableCache{} - } - - var stc gosmiSnmpTableCache - var ok bool - if stc, ok = gosmiSnmpTableCaches[oid]; !ok { - stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err = g.SnmpTableCall(oid) - gosmiSnmpTableCaches[oid] = stc - } - - gosmiSnmpTableCachesLock.Unlock() - return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err -} - -//nolint:revive //Too many return variable but necessary -func (g *gosmiTranslator) SnmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { - mibName, oidNum, oidText, _, node, err := g.SnmpTranslateFull(oid) - if err != nil { - return "", "", "", nil, fmt.Errorf("translating: %w", err) - } - - mibPrefix := mibName + "::" - - col, tagOids := snmp.GetIndex(mibPrefix, node) - for _, c := range col { - _, isTag := tagOids[mibPrefix+c] - fields = append(fields, Field{Name: c, Oid: mibPrefix + c, IsTag: isTag}) - } - - return mibName, oidNum, oidText, fields, nil -} - -func (g *gosmiTranslator) SnmpFormatEnum(oid string, value interface{}, full bool) (string, error) { - //nolint:dogsled // only need to get the node - _, _, _, _, node, err := g.SnmpTranslateFull(oid) - - if err != nil { - return "", err - } - - var v models.Value - if full { - v = node.FormatValue(value, models.FormatEnumName, models.FormatEnumValue) - } else { - v = node.FormatValue(value, models.FormatEnumName) - } - - return v.Formatted, nil -} diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 4a571bc9af3ee..8f9df26266905 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -3,18 +3,11 @@ package snmp import ( _ "embed" - "encoding/binary" "errors" "fmt" - "math" - "net" - "strconv" - "strings" "sync" "time" - "github.com/gosnmp/gosnmp" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" @@ -25,25 +18,6 @@ import ( //go:embed sample.conf var sampleConfig string -type Translator interface { - SnmpTranslate(oid string) ( - mibName string, oidNum string, oidText string, - conversion string, - err error, - ) - - SnmpTable(oid string) ( - mibName string, oidNum string, oidText string, - fields []Field, - err error, - ) - - SnmpFormatEnum(oid string, value interface{}, full bool) ( - formatted string, - err error, - ) -} - // Snmp holds the configuration for the plugin. type Snmp struct { // The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g. @@ -55,19 +29,19 @@ type Snmp struct { snmp.ClientConfig - Tables []Table `toml:"table"` + Tables []snmp.Table `toml:"table"` // Name & Fields are the elements of a Table. // Telegraf chokes if we try to embed a Table. So instead we have to embed the // fields of a Table, and construct a Table during runtime. - Name string `toml:"name"` - Fields []Field `toml:"field"` + Name string `toml:"name"` + Fields []snmp.Field `toml:"field"` - connectionCache []snmpConnection + connectionCache []snmp.Connection Log telegraf.Logger `toml:"-"` - translator Translator + translator snmp.Translator } func (s *Snmp) SetTranslator(name string) { @@ -82,17 +56,17 @@ func (s *Snmp) Init() error { var err error switch s.Translator { case "gosmi": - s.translator, err = NewGosmiTranslator(s.Path, s.Log) + s.translator, err = snmp.NewGosmiTranslator(s.Path, s.Log) if err != nil { return err } case "netsnmp": - s.translator = NewNetsnmpTranslator() + s.translator = snmp.NewNetsnmpTranslator(s.Log) default: return errors.New("invalid translator value") } - s.connectionCache = make([]snmpConnection, len(s.Agents)) + s.connectionCache = make([]snmp.Connection, len(s.Agents)) for i := range s.Tables { if err := s.Tables[i].Init(s.translator); err != nil { @@ -101,7 +75,7 @@ func (s *Snmp) Init() error { } for i := range s.Fields { - if err := s.Fields[i].init(s.translator); err != nil { + if err := s.Fields[i].Init(s.translator); err != nil { return fmt.Errorf("initializing field %s: %w", s.Fields[i].Name, err) } } @@ -119,200 +93,6 @@ func (s *Snmp) Init() error { return nil } -// Table holds the configuration for a SNMP table. -type Table struct { - // Name will be the name of the measurement. - Name string - - // Which tags to inherit from the top-level config. - InheritTags []string - - // Adds each row's table index as a tag. - IndexAsTag bool - - // Fields is the tags and values to look up. - Fields []Field `toml:"field"` - - // OID for automatic field population. - // If provided, init() will populate Fields with all the table columns of the - // given OID. - Oid string - - initialized bool -} - -// Init() builds & initializes the nested fields. -func (t *Table) Init(tr Translator) error { - //makes sure oid or name is set in config file - //otherwise snmp will produce metrics with an empty name - if t.Oid == "" && t.Name == "" { - return errors.New("SNMP table in config file is not named. One or both of the oid and name settings must be set") - } - - if t.initialized { - return nil - } - - if err := t.initBuild(tr); err != nil { - return err - } - - secondaryIndexTablePresent := false - // initialize all the nested fields - for i := range t.Fields { - if err := t.Fields[i].init(tr); err != nil { - return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err) - } - if t.Fields[i].SecondaryIndexTable { - if secondaryIndexTablePresent { - return errors.New("only one field can be SecondaryIndexTable") - } - secondaryIndexTablePresent = true - } - } - - t.initialized = true - return nil -} - -// initBuild initializes the table if it has an OID configured. If so, the -// net-snmp tools will be used to look up the OID and auto-populate the table's -// fields. -func (t *Table) initBuild(tr Translator) error { - if t.Oid == "" { - return nil - } - - _, _, oidText, fields, err := tr.SnmpTable(t.Oid) - if err != nil { - return err - } - - if t.Name == "" { - t.Name = oidText - } - - knownOIDs := map[string]bool{} - for _, f := range t.Fields { - knownOIDs[f.Oid] = true - } - for _, f := range fields { - if !knownOIDs[f.Oid] { - t.Fields = append(t.Fields, f) - } - } - - return nil -} - -// Field holds the configuration for a Field to look up. -type Field struct { - // Name will be the name of the field. - Name string - // OID is prefix for this field. The plugin will perform a walk through all - // OIDs with this as their parent. For each value found, the plugin will strip - // off the OID prefix, and use the remainder as the index. For multiple fields - // to show up in the same row, they must share the same index. - Oid string - // OidIndexSuffix is the trailing sub-identifier on a table record OID that will be stripped off to get the record's index. - OidIndexSuffix string - // OidIndexLength specifies the length of the index in OID path segments. It can be used to remove sub-identifiers that vary in content or length. - OidIndexLength int - // IsTag controls whether this OID is output as a tag or a value. - IsTag bool - // Conversion controls any type conversion that is done on the value. - // "float"/"float(0)" will convert the value into a float. - // "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. - // "int" will convert the value into an integer. - // "hwaddr" will convert a 6-byte string to a MAC address. - // "ipaddr" will convert the value to an IPv4 or IPv6 address. - // "enum"/"enum(1)" will convert the value according to its syntax. (Only supported with gosmi translator) - Conversion string - // Translate tells if the value of the field should be snmptranslated - Translate bool - // Secondary index table allows to merge data from two tables with different index - // that this filed will be used to join them. There can be only one secondary index table. - SecondaryIndexTable bool - // This field is using secondary index, and will be later merged with primary index - // using SecondaryIndexTable. SecondaryIndexTable and SecondaryIndexUse are exclusive. - SecondaryIndexUse bool - // Controls if entries from secondary table should be added or not if joining - // index is present or not. I set to true, means that join is outer, and - // index is prepended with "Secondary." for missing values to avoid overlapping - // indexes from both tables. - // Can be set per field or globally with SecondaryIndexTable, global true overrides - // per field false. - SecondaryOuterJoin bool - - initialized bool -} - -// init() converts OID names to numbers, and sets the .Name attribute if unset. -func (f *Field) init(tr Translator) error { - if f.initialized { - return nil - } - - // check if oid needs translation or name is not set - if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" { - _, oidNum, oidText, conversion, err := tr.SnmpTranslate(f.Oid) - if err != nil { - return fmt.Errorf("translating: %w", err) - } - f.Oid = oidNum - if f.Name == "" { - f.Name = oidText - } - if f.Conversion == "" { - f.Conversion = conversion - } - //TODO use textual convention conversion from the MIB - } - - if f.SecondaryIndexTable && f.SecondaryIndexUse { - return errors.New("SecondaryIndexTable and UseSecondaryIndex are exclusive") - } - - if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin { - return errors.New("SecondaryOuterJoin set to true, but field is not being used in join") - } - - f.initialized = true - return nil -} - -// RTable is the resulting table built from a Table. -type RTable struct { - // Name is the name of the field, copied from Table.Name. - Name string - // Time is the time the table was built. - Time time.Time - // Rows are the rows that were found, one row for each table OID index found. - Rows []RTableRow -} - -// RTableRow is the resulting row containing all the OID values which shared -// the same index. -type RTableRow struct { - // Tags are all the Field values which had IsTag=true. - Tags map[string]string - // Fields are all the Field values which had IsTag=false. - Fields map[string]interface{} -} - -type walkError struct { - msg string - err error -} - -func (e *walkError) Error() string { - return e.msg -} - -func (e *walkError) Unwrap() error { - return e.err -} - // Gather retrieves all the configured fields and tables. // Any error encountered does not halt the process. The errors are accumulated // and returned at the end. @@ -329,7 +109,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } // First is the top-level fields. We treat the fields as table prefixes with an empty index. - t := Table{ + t := snmp.Table{ Name: s.Name, Fields: s.Fields, } @@ -351,8 +131,8 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { return nil } -func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, topTags map[string]string, walk bool) error { - rt, err := t.Build(gs, walk, s.translator) +func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmp.Connection, t snmp.Table, topTags map[string]string, walk bool) error { + rt, err := t.Build(gs, walk) if err != nil { return err } @@ -380,206 +160,11 @@ func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, return nil } -// Build retrieves all the fields specified in the table and constructs the RTable. -func (t Table) Build(gs snmpConnection, walk bool, tr Translator) (*RTable, error) { - rows := map[string]RTableRow{} - - //translation table for secondary index (when preforming join on two tables) - secIdxTab := make(map[string]string) - secGlobalOuterJoin := false - for i, f := range t.Fields { - if f.SecondaryIndexTable { - secGlobalOuterJoin = f.SecondaryOuterJoin - if i != 0 { - t.Fields[0], t.Fields[i] = t.Fields[i], t.Fields[0] - } - break - } - } - - tagCount := 0 - for _, f := range t.Fields { - if f.IsTag { - tagCount++ - } - - if len(f.Oid) == 0 { - return nil, fmt.Errorf("cannot have empty OID on field %s", f.Name) - } - var oid string - if f.Oid[0] == '.' { - oid = f.Oid - } else { - // make sure OID has "." because the BulkWalkAll results do, and the prefix needs to match - oid = "." + f.Oid - } - - // ifv contains a mapping of table OID index to field value - ifv := map[string]interface{}{} - - if !walk { - // This is used when fetching non-table fields. Fields configured a the top - // scope of the plugin. - // We fetch the fields directly, and add them to ifv as if the index were an - // empty string. This results in all the non-table fields sharing the same - // index, and being added on the same row. - if pkt, err := gs.Get([]string{oid}); err != nil { - if errors.Is(err, gosnmp.ErrUnknownSecurityLevel) { - return nil, errors.New("unknown security level (sec_level)") - } else if errors.Is(err, gosnmp.ErrUnknownUsername) { - return nil, errors.New("unknown username (sec_name)") - } else if errors.Is(err, gosnmp.ErrWrongDigest) { - return nil, errors.New("wrong digest (auth_protocol, auth_password)") - } else if errors.Is(err, gosnmp.ErrDecryption) { - return nil, errors.New("decryption error (priv_protocol, priv_password)") - } - return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) - } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { - ent := pkt.Variables[0] - fv, err := fieldConvert(tr, f.Conversion, ent) - if err != nil { - return nil, fmt.Errorf("converting %q (OID %s) for field %s: %w", ent.Value, ent.Name, f.Name, err) - } - ifv[""] = fv - } - } else { - err := gs.Walk(oid, func(ent gosnmp.SnmpPDU) error { - if len(ent.Name) <= len(oid) || ent.Name[:len(oid)+1] != oid+"." { - return &walkError{} // break the walk - } - - idx := ent.Name[len(oid):] - if f.OidIndexSuffix != "" { - if !strings.HasSuffix(idx, f.OidIndexSuffix) { - // this entry doesn't match our OidIndexSuffix. skip it - return nil - } - idx = idx[:len(idx)-len(f.OidIndexSuffix)] - } - if f.OidIndexLength != 0 { - i := f.OidIndexLength + 1 // leading separator - idx = strings.Map(func(r rune) rune { - if r == '.' { - i-- - } - if i < 1 { - return -1 - } - return r - }, idx) - } - - // snmptranslate table field value here - if f.Translate { - if entOid, ok := ent.Value.(string); ok { - _, _, oidText, _, err := tr.SnmpTranslate(entOid) - if err == nil { - // If no error translating, the original value for ent.Value should be replaced - ent.Value = oidText - } - } - } - - fv, err := fieldConvert(tr, f.Conversion, ent) - if err != nil { - return &walkError{ - msg: fmt.Sprintf("converting %q (OID %s) for field %s", ent.Value, ent.Name, f.Name), - err: err, - } - } - ifv[idx] = fv - return nil - }) - if err != nil { - // Our callback always wraps errors in a walkError. - // If this error isn't a walkError, we know it's not - // from the callback - var walkErr *walkError - if !errors.As(err, &walkErr) { - return nil, fmt.Errorf("performing bulk walk for field %s: %w", f.Name, err) - } - } - } - - for idx, v := range ifv { - if f.SecondaryIndexUse { - if newidx, ok := secIdxTab[idx]; ok { - idx = newidx - } else { - if !secGlobalOuterJoin && !f.SecondaryOuterJoin { - continue - } - idx = ".Secondary" + idx - } - } - rtr, ok := rows[idx] - if !ok { - rtr = RTableRow{} - rtr.Tags = map[string]string{} - rtr.Fields = map[string]interface{}{} - rows[idx] = rtr - } - if t.IndexAsTag && idx != "" { - if idx[0] == '.' { - idx = idx[1:] - } - rtr.Tags["index"] = idx - } - // don't add an empty string - if vs, ok := v.(string); !ok || vs != "" { - if f.IsTag { - if ok { - rtr.Tags[f.Name] = vs - } else { - rtr.Tags[f.Name] = fmt.Sprintf("%v", v) - } - } else { - rtr.Fields[f.Name] = v - } - if f.SecondaryIndexTable { - //indexes are stored here with prepending "." so we need to add them if needed - var vss string - if ok { - vss = "." + vs - } else { - vss = fmt.Sprintf(".%v", v) - } - if idx[0] == '.' { - secIdxTab[vss] = idx - } else { - secIdxTab[vss] = "." + idx - } - } - } - } - } - - rt := RTable{ - Name: t.Name, - Time: time.Now(), //TODO record time at start - Rows: make([]RTableRow, 0, len(rows)), - } - for _, r := range rows { - rt.Rows = append(rt.Rows, r) - } - return &rt, nil -} - -// snmpConnection is an interface which wraps a *gosnmp.GoSNMP object. -// We interact through an interface so we can mock it out in tests. -type snmpConnection interface { - Host() string - //BulkWalkAll(string) ([]gosnmp.SnmpPDU, error) - Walk(string, gosnmp.WalkFunc) error - Get(oids []string) (*gosnmp.SnmpPacket, error) - Reconnect() error -} - // getConnection creates a snmpConnection (*gosnmp.GoSNMP) object and caches the // result using `agentIndex` as the cache key. This is done to allow multiple // connections to a single address. It is an error to use a connection in // more than one goroutine. -func (s *Snmp) getConnection(idx int) (snmpConnection, error) { +func (s *Snmp) getConnection(idx int) (snmp.Connection, error) { if gs := s.connectionCache[idx]; gs != nil { if err := gs.Reconnect(); err != nil { return gs, fmt.Errorf("reconnecting: %w", err) @@ -590,9 +175,7 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { agent := s.Agents[idx] - var err error - var gs snmp.GosnmpWrapper - gs, err = snmp.NewWrapper(s.ClientConfig) + gs, err := snmp.NewWrapper(s.ClientConfig) if err != nil { return nil, err } @@ -611,173 +194,6 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { return gs, nil } -// fieldConvert converts from any type according to the conv specification -func fieldConvert(tr Translator, conv string, ent gosnmp.SnmpPDU) (v interface{}, err error) { - if conv == "" { - if bs, ok := ent.Value.([]byte); ok { - return string(bs), nil - } - return ent.Value, nil - } - - var d int - if _, err := fmt.Sscanf(conv, "float(%d)", &d); err == nil || conv == "float" { - v = ent.Value - switch vt := v.(type) { - case float32: - v = float64(vt) / math.Pow10(d) - case float64: - v = vt / math.Pow10(d) - case int: - v = float64(vt) / math.Pow10(d) - case int8: - v = float64(vt) / math.Pow10(d) - case int16: - v = float64(vt) / math.Pow10(d) - case int32: - v = float64(vt) / math.Pow10(d) - case int64: - v = float64(vt) / math.Pow10(d) - case uint: - v = float64(vt) / math.Pow10(d) - case uint8: - v = float64(vt) / math.Pow10(d) - case uint16: - v = float64(vt) / math.Pow10(d) - case uint32: - v = float64(vt) / math.Pow10(d) - case uint64: - v = float64(vt) / math.Pow10(d) - case []byte: - vf, _ := strconv.ParseFloat(string(vt), 64) - v = vf / math.Pow10(d) - case string: - vf, _ := strconv.ParseFloat(vt, 64) - v = vf / math.Pow10(d) - } - return v, nil - } - - if conv == "int" { - v = ent.Value - switch vt := v.(type) { - case float32: - v = int64(vt) - case float64: - v = int64(vt) - case int: - v = int64(vt) - case int8: - v = int64(vt) - case int16: - v = int64(vt) - case int32: - v = int64(vt) - case int64: - v = vt - case uint: - v = int64(vt) - case uint8: - v = int64(vt) - case uint16: - v = int64(vt) - case uint32: - v = int64(vt) - case uint64: - v = int64(vt) - case []byte: - v, _ = strconv.ParseInt(string(vt), 10, 64) - case string: - v, _ = strconv.ParseInt(vt, 10, 64) - } - return v, nil - } - - if conv == "hwaddr" { - switch vt := ent.Value.(type) { - case string: - v = net.HardwareAddr(vt).String() - case []byte: - v = net.HardwareAddr(vt).String() - default: - return nil, fmt.Errorf("invalid type (%T) for hwaddr conversion", v) - } - return v, nil - } - - split := strings.Split(conv, ":") - if split[0] == "hextoint" && len(split) == 3 { - endian := split[1] - bit := split[2] - - bv, ok := ent.Value.([]byte) - if !ok { - return ent.Value, nil - } - - switch endian { - case "LittleEndian": - switch bit { - case "uint64": - v = binary.LittleEndian.Uint64(bv) - case "uint32": - v = binary.LittleEndian.Uint32(bv) - case "uint16": - v = binary.LittleEndian.Uint16(bv) - default: - return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) - } - case "BigEndian": - switch bit { - case "uint64": - v = binary.BigEndian.Uint64(bv) - case "uint32": - v = binary.BigEndian.Uint32(bv) - case "uint16": - v = binary.BigEndian.Uint16(bv) - default: - return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) - } - default: - return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian) - } - - return v, nil - } - - if conv == "ipaddr" { - var ipbs []byte - - switch vt := ent.Value.(type) { - case string: - ipbs = []byte(vt) - case []byte: - ipbs = vt - default: - return nil, fmt.Errorf("invalid type (%T) for ipaddr conversion", v) - } - - switch len(ipbs) { - case 4, 16: - v = net.IP(ipbs).String() - default: - return nil, fmt.Errorf("invalid length (%d) for ipaddr conversion", len(ipbs)) - } - - return v, nil - } - - if conv == "enum" { - return tr.SnmpFormatEnum(ent.Name, ent.Value, false) - } - - if conv == "enum(1)" { - return tr.SnmpFormatEnum(ent.Name, ent.Value, true) - } - - return nil, fmt.Errorf("invalid conversion type %q", conv) -} - func init() { inputs.Add("snmp", func() telegraf.Input { return &Snmp{ diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index e94aae8684b64..48abaf3652739 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -1,11 +1,9 @@ -//go:generate go run -tags generate snmp_mocks_generate.go package snmp import ( - "errors" "fmt" "net" - "os/exec" + "path/filepath" "sync" "testing" "time" @@ -65,135 +63,65 @@ func (tsc *testSNMPConnection) Reconnect() error { var tsc = &testSNMPConnection{ host: "tsc", values: map[string]interface{}{ - ".1.0.0.0.1.1.0": "foo", - ".1.0.0.0.1.1.1": []byte("bar"), - ".1.0.0.0.1.1.2": []byte(""), - ".1.0.0.0.1.102": "bad", - ".1.0.0.0.1.2.0": 1, - ".1.0.0.0.1.2.1": 2, - ".1.0.0.0.1.2.2": 0, - ".1.0.0.0.1.3.0": "0.123", - ".1.0.0.0.1.3.1": "0.456", - ".1.0.0.0.1.3.2": "0.000", - ".1.0.0.0.1.3.3": "9.999", - ".1.0.0.0.1.5.0": 123456, - ".1.0.0.1.1": "baz", - ".1.0.0.1.2": 234, - ".1.0.0.1.3": []byte("byte slice"), - ".1.0.0.2.1.5.0.9.9": 11, - ".1.0.0.2.1.5.1.9.9": 22, - ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", - ".1.0.0.3.1.1.10": "instance", - ".1.0.0.3.1.1.11": "instance2", - ".1.0.0.3.1.1.12": "instance3", - ".1.0.0.3.1.2.10": 10, - ".1.0.0.3.1.2.11": 20, - ".1.0.0.3.1.2.12": 20, - ".1.0.0.3.1.3.10": 1, - ".1.0.0.3.1.3.11": 2, - ".1.0.0.3.1.3.12": 3, + ".1.0.0.0.1.1.0": "foo", + ".1.0.0.0.1.1.1": []byte("bar"), + ".1.0.0.0.1.1.2": []byte(""), + ".1.0.0.0.1.102": "bad", + ".1.0.0.0.1.2.0": 1, + ".1.0.0.0.1.2.1": 2, + ".1.0.0.0.1.2.2": 0, + ".1.0.0.0.1.3.0": "0.123", + ".1.0.0.0.1.3.1": "0.456", + ".1.0.0.0.1.3.2": "0.000", + ".1.0.0.0.1.3.3": "9.999", + ".1.0.0.0.1.5.0": 123456, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.1.1": "baz", + ".1.0.0.1.2": 234, + ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.2.1.5.0.9.9": 11, + ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, + ".1.3.6.1.2.1.3.1.1.1.0": "foo", + ".1.3.6.1.2.1.3.1.1.1.1": []byte("bar"), + ".1.3.6.1.2.1.3.1.1.1.2": []byte(""), + ".1.3.6.1.2.1.3.1.1.102": "bad", + ".1.3.6.1.2.1.3.1.1.2.0": 1, + ".1.3.6.1.2.1.3.1.1.2.1": 2, + ".1.3.6.1.2.1.3.1.1.2.2": 0, + ".1.3.6.1.2.1.3.1.1.3.0": "1.3.6.1.2.1.3.1.1.3", + ".1.3.6.1.2.1.3.1.1.5.0": 123456, }, } -func TestFieldInit(t *testing.T) { - translations := []struct { - inputOid string - inputName string - inputConversion string - expectedOid string - expectedName string - expectedConversion string - }{ - {".1.2.3", "foo", "", ".1.2.3", "foo", ""}, - {".iso.2.3", "foo", "", ".1.2.3", "foo", ""}, - {".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""}, - {".1.0.0.0.1.1.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, - {".999", "", "", ".999", ".999", ""}, - {"TEST::server", "", "", ".1.0.0.0.1.1", "server", ""}, - {"TEST::server.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, - {"TEST::server", "foo", "", ".1.0.0.0.1.1", "foo", ""}, - {"IF-MIB::ifPhysAddress.1", "", "", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "hwaddr"}, - {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, - {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, - {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, - } - - tr := NewNetsnmpTranslator() - for _, txl := range translations { - f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} - err := f.init(tr) - require.NoError(t, err, "inputOid=%q inputName=%q", txl.inputOid, txl.inputName) - require.Equal(t, txl.expectedOid, f.Oid, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) - require.Equal(t, txl.expectedName, f.Name, "inputOid=%q inputName=%q inputConversion=%q", txl.inputOid, txl.inputName, txl.inputConversion) - } -} - -func TestTableInit(t *testing.T) { - tbl := Table{ - Oid: ".1.0.0.0", - Fields: []Field{ - {Oid: ".999", Name: "foo"}, - {Oid: "TEST::description", Name: "description", IsTag: true}, - }, - } - err := tbl.Init(NewNetsnmpTranslator()) - require.NoError(t, err) - - require.Equal(t, "testTable", tbl.Name) - - require.Len(t, tbl.Fields, 5) - require.Contains(t, tbl.Fields, Field{Oid: ".999", Name: "foo", initialized: true}) - require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) - require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) - require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) - require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", IsTag: true, initialized: true}) -} - func TestSnmpInit(t *testing.T) { s := &Snmp{ - Tables: []Table{ - {Oid: "TEST::testTable"}, - }, - Fields: []Field{ - {Oid: "TEST::hostname"}, - }, ClientConfig: snmp.ClientConfig{ Translator: "netsnmp", }, } - err := s.Init() - require.NoError(t, err) - - require.Len(t, s.Tables[0].Fields, 4) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", initialized: true}) - - require.Equal(t, Field{ - Oid: ".1.0.0.1.1", - Name: "hostname", - initialized: true, - }, s.Fields[0]) + require.NoError(t, s.Init()) } func TestSnmpInit_noTranslate(t *testing.T) { - // override execCommand so it returns exec.ErrNotFound - defer func(ec func(string, ...string) *exec.Cmd) { execCommand = ec }(execCommand) - execCommand = func(_ string, _ ...string) *exec.Cmd { - return exec.Command("snmptranslateExecErrNotFound") - } - s := &Snmp{ - Fields: []Field{ + Fields: []snmp.Field{ {Oid: ".1.1.1.1", Name: "one", IsTag: true}, {Oid: ".1.1.1.2", Name: "two"}, {Oid: ".1.1.1.3"}, }, - Tables: []Table{ + Tables: []snmp.Table{ {Name: "testing", - Fields: []Field{ + Fields: []snmp.Field{ {Oid: ".1.1.1.4", Name: "four", IsTag: true}, {Oid: ".1.1.1.5", Name: "five"}, {Oid: ".1.1.1.6"}, @@ -202,6 +130,7 @@ func TestSnmpInit_noTranslate(t *testing.T) { ClientConfig: snmp.ClientConfig{ Translator: "netsnmp", }, + Log: testutil.Logger{Name: "inputs.snmp"}, } err := s.Init() @@ -234,8 +163,8 @@ func TestSnmpInit_noTranslate(t *testing.T) { func TestSnmpInit_noName_noOid(t *testing.T) { s := &Snmp{ - Tables: []Table{ - {Fields: []Field{ + Tables: []snmp.Table{ + {Fields: []snmp.Field{ {Oid: ".1.1.1.4", Name: "four", IsTag: true}, {Oid: ".1.1.1.5", Name: "five"}, {Oid: ".1.1.1.6"}, @@ -243,8 +172,7 @@ func TestSnmpInit_noName_noOid(t *testing.T) { }, } - err := s.Init() - require.Error(t, err) + require.Error(t, s.Init()) } func TestGetSNMPConnection_v2(t *testing.T) { @@ -258,8 +186,7 @@ func TestGetSNMPConnection_v2(t *testing.T) { Translator: "netsnmp", }, } - err := s.Init() - require.NoError(t, err) + require.NoError(t, s.Init()) gsc, err := s.getConnection(0) require.NoError(t, err) @@ -600,147 +527,11 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { require.Equal(t, (gs.Retries+1)*2, reqCount) } -func TestTableBuild_walk(t *testing.T) { - tbl := Table{ - Name: "mytable", - IndexAsTag: true, - Fields: []Field{ - { - Name: "myfield1", - Oid: ".1.0.0.0.1.1", - IsTag: true, - }, - { - Name: "myfield2", - Oid: ".1.0.0.0.1.2", - }, - { - Name: "myfield3", - Oid: ".1.0.0.0.1.3", - Conversion: "float", - }, - { - Name: "myfield4", - Oid: ".1.0.0.2.1.5", - OidIndexSuffix: ".9.9", - }, - { - Name: "myfield5", - Oid: ".1.0.0.2.1.5", - OidIndexLength: 1, - }, - { - Name: "myfield6", - Oid: ".1.0.0.0.1.6", - Translate: true, - }, - { - Name: "myfield7", - Oid: ".1.0.0.0.1.6", - Translate: false, - }, - }, - } - - tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) - require.NoError(t, err) - - require.Equal(t, "mytable", tb.Name) - rtr1 := RTableRow{ - Tags: map[string]string{ - "myfield1": "foo", - "index": "0", - }, - Fields: map[string]interface{}{ - "myfield2": 1, - "myfield3": float64(0.123), - "myfield4": 11, - "myfield5": 11, - "myfield6": "testTableEntry.7", - "myfield7": ".1.0.0.0.1.7", - }, - } - rtr2 := RTableRow{ - Tags: map[string]string{ - "myfield1": "bar", - "index": "1", - }, - Fields: map[string]interface{}{ - "myfield2": 2, - "myfield3": float64(0.456), - "myfield4": 22, - "myfield5": 22, - }, - } - rtr3 := RTableRow{ - Tags: map[string]string{ - "index": "2", - }, - Fields: map[string]interface{}{ - "myfield2": 0, - "myfield3": float64(0.0), - }, - } - rtr4 := RTableRow{ - Tags: map[string]string{ - "index": "3", - }, - Fields: map[string]interface{}{ - "myfield3": float64(9.999), - }, - } - require.Len(t, tb.Rows, 4) - require.Contains(t, tb.Rows, rtr1) - require.Contains(t, tb.Rows, rtr2) - require.Contains(t, tb.Rows, rtr3) - require.Contains(t, tb.Rows, rtr4) -} - -func TestTableBuild_noWalk(t *testing.T) { - tbl := Table{ - Name: "mytable", - Fields: []Field{ - { - Name: "myfield1", - Oid: ".1.0.0.1.1", - IsTag: true, - }, - { - Name: "myfield2", - Oid: ".1.0.0.1.2", - }, - { - Name: "myfield3", - Oid: ".1.0.0.1.2", - IsTag: true, - }, - { - Name: "empty", - Oid: ".1.0.0.0.1.1.2", - }, - { - Name: "noexist", - Oid: ".1.2.3.4.5", - }, - }, - } - - tb, err := tbl.Build(tsc, false, NewNetsnmpTranslator()) - require.NoError(t, err) - - rtr := RTableRow{ - Tags: map[string]string{"myfield1": "baz", "myfield3": "234"}, - Fields: map[string]interface{}{"myfield2": 234}, - } - require.Len(t, tb.Rows, 1) - require.Contains(t, tb.Rows, rtr) -} - func TestGather(t *testing.T) { s := &Snmp{ Agents: []string{"TestGather"}, Name: "mytable", - Fields: []Field{ + Fields: []snmp.Field{ { Name: "myfield1", Oid: ".1.0.0.1.1", @@ -755,11 +546,11 @@ func TestGather(t *testing.T) { Oid: "1.0.0.1.1", }, }, - Tables: []Table{ + Tables: []snmp.Table{ { Name: "myOtherTable", InheritTags: []string{"myfield1"}, - Fields: []Field{ + Fields: []snmp.Field{ { Name: "myOtherField", Oid: ".1.0.0.0.1.5", @@ -768,7 +559,7 @@ func TestGather(t *testing.T) { }, }, - connectionCache: []snmpConnection{ + connectionCache: []snmp.Connection{ tsc, }, } @@ -787,8 +578,7 @@ func TestGather(t *testing.T) { require.Len(t, m.Fields, 2) require.Equal(t, 234, m.Fields["myfield2"]) require.Equal(t, "baz", m.Fields["myfield3"]) - require.False(t, tstart.After(m.Time)) - require.False(t, tstop.Before(m.Time)) + require.WithinRange(t, m.Time, tstart, tstop) m2 := acc.Metrics[1] require.Equal(t, "myOtherTable", m2.Measurement) @@ -802,7 +592,7 @@ func TestGather_host(t *testing.T) { s := &Snmp{ Agents: []string{"TestGather"}, Name: "mytable", - Fields: []Field{ + Fields: []snmp.Field{ { Name: "host", Oid: ".1.0.0.1.1", @@ -814,7 +604,7 @@ func TestGather_host(t *testing.T) { }, }, - connectionCache: []snmpConnection{ + connectionCache: []snmp.Connection{ tsc, }, } @@ -828,366 +618,183 @@ func TestGather_host(t *testing.T) { require.Equal(t, "baz", m.Tags["host"]) } -func TestFieldConvert(t *testing.T) { - testTable := []struct { - input interface{} - conv string - expected interface{} - }{ - {[]byte("foo"), "", "foo"}, - {"0.123", "float", float64(0.123)}, - {[]byte("0.123"), "float", float64(0.123)}, - {float32(0.123), "float", float64(float32(0.123))}, - {float64(0.123), "float", float64(0.123)}, - {float64(0.123123123123), "float", float64(0.123123123123)}, - {123, "float", float64(123)}, - {123, "float(0)", float64(123)}, - {123, "float(4)", float64(0.0123)}, - {int8(123), "float(3)", float64(0.123)}, - {int16(123), "float(3)", float64(0.123)}, - {int32(123), "float(3)", float64(0.123)}, - {int64(123), "float(3)", float64(0.123)}, - {uint(123), "float(3)", float64(0.123)}, - {uint8(123), "float(3)", float64(0.123)}, - {uint16(123), "float(3)", float64(0.123)}, - {uint32(123), "float(3)", float64(0.123)}, - {uint64(123), "float(3)", float64(0.123)}, - {"123", "int", int64(123)}, - {[]byte("123"), "int", int64(123)}, - {"123123123123", "int", int64(123123123123)}, - {[]byte("123123123123"), "int", int64(123123123123)}, - {float32(12.3), "int", int64(12)}, - {float64(12.3), "int", int64(12)}, - {int(123), "int", int64(123)}, - {int8(123), "int", int64(123)}, - {int16(123), "int", int64(123)}, - {int32(123), "int", int64(123)}, - {int64(123), "int", int64(123)}, - {uint(123), "int", int64(123)}, - {uint8(123), "int", int64(123)}, - {uint16(123), "int", int64(123)}, - {uint32(123), "int", int64(123)}, - {uint64(123), "int", int64(123)}, - {[]byte("abcdef"), "hwaddr", "61:62:63:64:65:66"}, - {"abcdef", "hwaddr", "61:62:63:64:65:66"}, - {[]byte("abcd"), "ipaddr", "97.98.99.100"}, - {"abcd", "ipaddr", "97.98.99.100"}, - {[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"}, - {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:BigEndian:uint64", uint64(2602423610063712)}, - {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:BigEndian:uint32", uint32(605923)}, - {[]byte{0x00, 0x09}, "hextoint:BigEndian:uint16", uint16(9)}, - {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:LittleEndian:uint64", uint64(6934371307618175232)}, - {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:LittleEndian:uint32", uint32(3812493568)}, - {[]byte{0x00, 0x09}, "hextoint:LittleEndian:uint16", uint16(2304)}, - } +func TestSnmpInitGosmi(t *testing.T) { + testDataPath, err := filepath.Abs("../../../internal/snmp/testdata/gosmi") + require.NoError(t, err) - for _, tc := range testTable { - act, err := fieldConvert(NewNetsnmpTranslator(), tc.conv, gosnmp.SnmpPDU{Value: tc.input}) - require.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) - require.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + s := &Snmp{ + Tables: []snmp.Table{ + {Oid: "RFC1213-MIB::atTable"}, + }, + Fields: []snmp.Field{ + {Oid: "RFC1213-MIB::atPhysAddress"}, + }, + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + Translator: "gosmi", + }, } -} -func TestSnmpTranslateCache_miss(t *testing.T) { - snmpTranslateCaches = nil - oid := "IF-MIB::ifPhysAddress.1" - mibName, oidNum, oidText, conversion, err := NewNetsnmpTranslator().SnmpTranslate(oid) - require.Len(t, snmpTranslateCaches, 1) - stc := snmpTranslateCaches[oid] - require.NotNil(t, stc) - require.Equal(t, mibName, stc.mibName) - require.Equal(t, oidNum, stc.oidNum) - require.Equal(t, oidText, stc.oidText) - require.Equal(t, conversion, stc.conversion) - require.Equal(t, err, stc.err) -} + require.NoError(t, s.Init()) -func TestSnmpTranslateCache_hit(t *testing.T) { - snmpTranslateCaches = map[string]snmpTranslateCache{ - "foo": { - mibName: "a", - oidNum: "b", - oidText: "c", - conversion: "d", - err: errors.New("e"), - }, - } - mibName, oidNum, oidText, conversion, err := NewNetsnmpTranslator().SnmpTranslate("foo") - require.Equal(t, "a", mibName) - require.Equal(t, "b", oidNum) - require.Equal(t, "c", oidText) - require.Equal(t, "d", conversion) - require.Equal(t, errors.New("e"), err) - snmpTranslateCaches = nil -} + require.Len(t, s.Tables[0].Fields, 3) -func TestSnmpTableCache_miss(t *testing.T) { - snmpTableCaches = nil - oid := ".1.0.0.0" - mibName, oidNum, oidText, fields, err := NewNetsnmpTranslator().SnmpTable(oid) - require.Len(t, snmpTableCaches, 1) - stc := snmpTableCaches[oid] - require.NotNil(t, stc) - require.Equal(t, mibName, stc.mibName) - require.Equal(t, oidNum, stc.oidNum) - require.Equal(t, oidText, stc.oidText) - require.Equal(t, fields, stc.fields) - require.Equal(t, err, stc.err) -} + require.Equal(t, ".1.3.6.1.2.1.3.1.1.1", s.Tables[0].Fields[0].Oid) + require.Equal(t, "atIfIndex", s.Tables[0].Fields[0].Name) + require.True(t, s.Tables[0].Fields[0].IsTag) + require.Empty(t, s.Tables[0].Fields[0].Conversion) -func TestSnmpTableCache_hit(t *testing.T) { - snmpTableCaches = map[string]snmpTableCache{ - "foo": { - mibName: "a", - oidNum: "b", - oidText: "c", - fields: []Field{{Name: "d"}}, - err: errors.New("e"), - }, - } - mibName, oidNum, oidText, fields, err := NewNetsnmpTranslator().SnmpTable("foo") - require.Equal(t, "a", mibName) - require.Equal(t, "b", oidNum) - require.Equal(t, "c", oidText) - require.Equal(t, []Field{{Name: "d"}}, fields) - require.Equal(t, errors.New("e"), err) -} + require.Equal(t, ".1.3.6.1.2.1.3.1.1.2", s.Tables[0].Fields[1].Oid) + require.Equal(t, "atPhysAddress", s.Tables[0].Fields[1].Name) + require.False(t, s.Tables[0].Fields[1].IsTag) + require.Equal(t, "hwaddr", s.Tables[0].Fields[1].Conversion) -func TestTableJoin_walk(t *testing.T) { - tbl := Table{ - Name: "mytable", - IndexAsTag: true, - Fields: []Field{ - { - Name: "myfield1", - Oid: ".1.0.0.3.1.1", - IsTag: true, - }, - { - Name: "myfield2", - Oid: ".1.0.0.3.1.2", - }, - { - Name: "myfield3", - Oid: ".1.0.0.3.1.3", - SecondaryIndexTable: true, - }, - { - Name: "myfield4", - Oid: ".1.0.0.0.1.1", - SecondaryIndexUse: true, - IsTag: true, - }, - { - Name: "myfield5", - Oid: ".1.0.0.0.1.2", - SecondaryIndexUse: true, - }, - }, - } + require.Equal(t, ".1.3.6.1.2.1.3.1.1.3", s.Tables[0].Fields[2].Oid) + require.Equal(t, "atNetAddress", s.Tables[0].Fields[2].Name) + require.True(t, s.Tables[0].Fields[2].IsTag) + require.Empty(t, s.Tables[0].Fields[2].Conversion) - tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) - require.NoError(t, err) + require.Equal(t, ".1.3.6.1.2.1.3.1.1.2", s.Fields[0].Oid) + require.Equal(t, "atPhysAddress", s.Fields[0].Name) + require.False(t, s.Fields[0].IsTag) + require.Equal(t, "hwaddr", s.Fields[0].Conversion) +} - require.Equal(t, "mytable", tb.Name) - rtr1 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance", - "myfield4": "bar", - "index": "10", - }, - Fields: map[string]interface{}{ - "myfield2": 10, - "myfield3": 1, - "myfield5": 2, - }, - } - rtr2 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance2", - "index": "11", - }, - Fields: map[string]interface{}{ - "myfield2": 20, - "myfield3": 2, - "myfield5": 0, +func TestSnmpInit_noTranslateGosmi(t *testing.T) { + s := &Snmp{ + Fields: []snmp.Field{ + {Oid: ".9.1.1.1.1", Name: "one", IsTag: true}, + {Oid: ".9.1.1.1.2", Name: "two"}, + {Oid: ".9.1.1.1.3"}, }, - } - rtr3 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance3", - "index": "12", + Tables: []snmp.Table{ + {Name: "testing", + Fields: []snmp.Field{ + {Oid: ".9.1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".9.1.1.1.5", Name: "five"}, + {Oid: ".9.1.1.1.6"}, + }}, }, - Fields: map[string]interface{}{ - "myfield2": 20, - "myfield3": 3, + ClientConfig: snmp.ClientConfig{ + Path: []string{}, + Translator: "gosmi", }, } - require.Len(t, tb.Rows, 3) - require.Contains(t, tb.Rows, rtr1) - require.Contains(t, tb.Rows, rtr2) - require.Contains(t, tb.Rows, rtr3) + + require.NoError(t, s.Init()) + + require.Equal(t, ".9.1.1.1.1", s.Fields[0].Oid) + require.Equal(t, "one", s.Fields[0].Name) + require.True(t, s.Fields[0].IsTag) + + require.Equal(t, ".9.1.1.1.2", s.Fields[1].Oid) + require.Equal(t, "two", s.Fields[1].Name) + require.False(t, s.Fields[1].IsTag) + + require.Equal(t, ".9.1.1.1.3", s.Fields[2].Oid) + require.Equal(t, ".9.1.1.1.3", s.Fields[2].Name) + require.False(t, s.Fields[2].IsTag) + + require.Equal(t, ".9.1.1.1.4", s.Tables[0].Fields[0].Oid) + require.Equal(t, "four", s.Tables[0].Fields[0].Name) + require.True(t, s.Tables[0].Fields[0].IsTag) + + require.Equal(t, ".9.1.1.1.5", s.Tables[0].Fields[1].Oid) + require.Equal(t, "five", s.Tables[0].Fields[1].Name) + require.False(t, s.Tables[0].Fields[1].IsTag) + + require.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Oid) + require.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Name) + require.False(t, s.Tables[0].Fields[2].IsTag) } -func TestTableOuterJoin_walk(t *testing.T) { - tbl := Table{ - Name: "mytable", - IndexAsTag: true, - Fields: []Field{ +func TestGatherGosmi(t *testing.T) { + s := &Snmp{ + Agents: []string{"TestGather"}, + Name: "mytable", + Fields: []snmp.Field{ { Name: "myfield1", - Oid: ".1.0.0.3.1.1", + Oid: ".1.0.0.1.1", IsTag: true, }, { Name: "myfield2", - Oid: ".1.0.0.3.1.2", - }, - { - Name: "myfield3", - Oid: ".1.0.0.3.1.3", - SecondaryIndexTable: true, - SecondaryOuterJoin: true, + Oid: ".1.0.0.1.2", }, { - Name: "myfield4", - Oid: ".1.0.0.0.1.1", - SecondaryIndexUse: true, - IsTag: true, + Name: "myfield3", + Oid: "1.0.0.1.1", }, + }, + Tables: []snmp.Table{ { - Name: "myfield5", - Oid: ".1.0.0.0.1.2", - SecondaryIndexUse: true, + Name: "myOtherTable", + InheritTags: []string{"myfield1"}, + Fields: []snmp.Field{ + { + Name: "myOtherField", + Oid: ".1.0.0.0.1.5", + }, + }, }, }, - } - tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) - require.NoError(t, err) + connectionCache: []snmp.Connection{tsc}, - require.Equal(t, "mytable", tb.Name) - rtr1 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance", - "myfield4": "bar", - "index": "10", - }, - Fields: map[string]interface{}{ - "myfield2": 10, - "myfield3": 1, - "myfield5": 2, - }, - } - rtr2 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance2", - "index": "11", - }, - Fields: map[string]interface{}{ - "myfield2": 20, - "myfield3": 2, - "myfield5": 0, - }, - } - rtr3 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance3", - "index": "12", - }, - Fields: map[string]interface{}{ - "myfield2": 20, - "myfield3": 3, - }, - } - rtr4 := RTableRow{ - Tags: map[string]string{ - "index": "Secondary.0", - "myfield4": "foo", - }, - Fields: map[string]interface{}{ - "myfield5": 1, + ClientConfig: snmp.ClientConfig{ + Translator: "gosmi", }, } - require.Len(t, tb.Rows, 4) - require.Contains(t, tb.Rows, rtr1) - require.Contains(t, tb.Rows, rtr2) - require.Contains(t, tb.Rows, rtr3) - require.Contains(t, tb.Rows, rtr4) + acc := &testutil.Accumulator{} + + tstart := time.Now() + require.NoError(t, s.Gather(acc)) + tstop := time.Now() + + require.Len(t, acc.Metrics, 2) + + m := acc.Metrics[0] + require.Equal(t, "mytable", m.Measurement) + require.Equal(t, "tsc", m.Tags[s.AgentHostTag]) + require.Equal(t, "baz", m.Tags["myfield1"]) + require.Len(t, m.Fields, 2) + require.Equal(t, 234, m.Fields["myfield2"]) + require.Equal(t, "baz", m.Fields["myfield3"]) + require.WithinRange(t, m.Time, tstart, tstop) + + m2 := acc.Metrics[1] + require.Equal(t, "myOtherTable", m2.Measurement) + require.Equal(t, "tsc", m2.Tags[s.AgentHostTag]) + require.Equal(t, "baz", m2.Tags["myfield1"]) + require.Len(t, m2.Fields, 1) + require.Equal(t, 123456, m2.Fields["myOtherField"]) } -func TestTableJoinNoIndexAsTag_walk(t *testing.T) { - tbl := Table{ - Name: "mytable", - IndexAsTag: false, - Fields: []Field{ +func TestGather_hostGosmi(t *testing.T) { + s := &Snmp{ + Agents: []string{"TestGather"}, + Name: "mytable", + Fields: []snmp.Field{ { - Name: "myfield1", - Oid: ".1.0.0.3.1.1", + Name: "host", + Oid: ".1.0.0.1.1", IsTag: true, }, { Name: "myfield2", - Oid: ".1.0.0.3.1.2", - }, - { - Name: "myfield3", - Oid: ".1.0.0.3.1.3", - SecondaryIndexTable: true, - }, - { - Name: "myfield4", - Oid: ".1.0.0.0.1.1", - SecondaryIndexUse: true, - IsTag: true, - }, - { - Name: "myfield5", - Oid: ".1.0.0.0.1.2", - SecondaryIndexUse: true, + Oid: ".1.0.0.1.2", }, }, + + connectionCache: []snmp.Connection{tsc}, } - tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) - require.NoError(t, err) + acc := &testutil.Accumulator{} - require.Equal(t, "mytable", tb.Name) - rtr1 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance", - "myfield4": "bar", - //"index": "10", - }, - Fields: map[string]interface{}{ - "myfield2": 10, - "myfield3": 1, - "myfield5": 2, - }, - } - rtr2 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance2", - //"index": "11", - }, - Fields: map[string]interface{}{ - "myfield2": 20, - "myfield3": 2, - "myfield5": 0, - }, - } - rtr3 := RTableRow{ - Tags: map[string]string{ - "myfield1": "instance3", - //"index": "12", - }, - Fields: map[string]interface{}{ - "myfield2": 20, - "myfield3": 3, - }, - } - require.Len(t, tb.Rows, 3) - require.Contains(t, tb.Rows, rtr1) - require.Contains(t, tb.Rows, rtr2) - require.Contains(t, tb.Rows, rtr3) + require.NoError(t, s.Gather(acc)) + + require.Len(t, acc.Metrics, 1) + m := acc.Metrics[0] + require.Equal(t, "baz", m.Tags["host"]) } diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index cc407c85501d0..a7cc0e2ed62c3 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -404,6 +404,9 @@ SELECT DISTINCT ,'Backup/Restore Throughput/sec' ,'Total Server Memory (KB)' ,'Target Server Memory (KB)' + ,'Stolen Server Memory (KB)' + ,'Database Cache Memory (KB)' + ,'Free Memory (KB)' ,'Log Flushes/sec' ,'Log Flush Wait Time' ,'Memory broker clerk size' diff --git a/plugins/outputs/nebius_cloud_monitoring/nebius_cloud_monitoring.go b/plugins/outputs/nebius_cloud_monitoring/nebius_cloud_monitoring.go index 4e5723ffb0ce3..89cf4f55c75f4 100644 --- a/plugins/outputs/nebius_cloud_monitoring/nebius_cloud_monitoring.go +++ b/plugins/outputs/nebius_cloud_monitoring/nebius_cloud_monitoring.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "net/http" + "os" "time" "github.com/influxdata/telegraf" @@ -86,6 +87,9 @@ func (a *NebiusCloudMonitoring) Init() error { if a.service == "" { a.service = "custom" } + if service := os.Getenv("NEBIUS_SERVICE"); service != "" { + a.service = service + } if a.metadataTokenURL == "" { a.metadataTokenURL = defaultMetadataTokenURL } diff --git a/plugins/outputs/nebius_cloud_monitoring/sample.conf b/plugins/outputs/nebius_cloud_monitoring/sample.conf index adde94c7d7c54..1429938e6a22b 100644 --- a/plugins/outputs/nebius_cloud_monitoring/sample.conf +++ b/plugins/outputs/nebius_cloud_monitoring/sample.conf @@ -4,4 +4,4 @@ # timeout = "20s" ## Nebius.Cloud monitoring API endpoint. Normally should not be changed - # endpoint = "https://monitoring.api.il.nebius.cloud/monitoring/v2/data/write" + # endpoint = "https://monitoring.api.il.nebius.cloud/monitoring/v2/data/write" \ No newline at end of file diff --git a/plugins/outputs/websocket/README.md b/plugins/outputs/websocket/README.md index 56cf63d7e1470..12d38c363a0f4 100644 --- a/plugins/outputs/websocket/README.md +++ b/plugins/outputs/websocket/README.md @@ -15,6 +15,14 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. [CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins +## Secret-store support + +This plugin supports secrets from secret-stores for the `headers` option. +See the [secret-store documentation][SECRETSTORE] for more details on how +to use them. + +[SECRETSTORE]: ../../../docs/CONFIGURATION.md#secret-store-secrets + ## Configuration ```toml @sample.conf diff --git a/plugins/outputs/websocket/websocket.go b/plugins/outputs/websocket/websocket.go index 86a919b826eb3..f772c159c0e05 100644 --- a/plugins/outputs/websocket/websocket.go +++ b/plugins/outputs/websocket/websocket.go @@ -30,13 +30,13 @@ const ( // WebSocket can output to WebSocket endpoint. type WebSocket struct { - URL string `toml:"url"` - ConnectTimeout config.Duration `toml:"connect_timeout"` - WriteTimeout config.Duration `toml:"write_timeout"` - ReadTimeout config.Duration `toml:"read_timeout"` - Headers map[string]string `toml:"headers"` - UseTextFrames bool `toml:"use_text_frames"` - Log telegraf.Logger `toml:"-"` + URL string `toml:"url"` + ConnectTimeout config.Duration `toml:"connect_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + ReadTimeout config.Duration `toml:"read_timeout"` + Headers map[string]*config.Secret `toml:"headers"` + UseTextFrames bool `toml:"use_text_frames"` + Log telegraf.Logger `toml:"-"` proxy.HTTPProxy proxy.Socks5ProxyConfig tls.ClientConfig @@ -92,7 +92,13 @@ func (w *WebSocket) Connect() error { headers := http.Header{} for k, v := range w.Headers { - headers.Set(k, v) + secret, err := v.Get() + if err != nil { + return fmt.Errorf("getting header secret %q failed: %w", k, err) + } + + headers.Set(k, secret.String()) + secret.Destroy() } conn, resp, err := dialer.Dial(w.URL, headers) diff --git a/plugins/outputs/websocket/websocket_test.go b/plugins/outputs/websocket/websocket_test.go index 8d33152fe0225..982890557bae6 100644 --- a/plugins/outputs/websocket/websocket_test.go +++ b/plugins/outputs/websocket/websocket_test.go @@ -101,7 +101,8 @@ func initWebSocket(s *testServer) *WebSocket { w := newWebSocket() w.Log = testutil.Logger{} w.URL = s.URL - w.Headers = map[string]string{testHeaderName: testHeaderValue} + headerSecret := config.NewSecret([]byte(testHeaderValue)) + w.Headers = map[string]*config.Secret{testHeaderName: &headerSecret} w.SetSerializer(newTestSerializer()) return w } diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go index 1c8a88679615f..7ab9ea79cae1e 100644 --- a/plugins/processors/converter/converter_test.go +++ b/plugins/processors/converter/converter_test.go @@ -836,6 +836,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go index e483a0e6d47b9..f4f7e067a3851 100644 --- a/plugins/processors/date/date_test.go +++ b/plugins/processors/date/date_test.go @@ -289,6 +289,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/defaults/defaults_test.go b/plugins/processors/defaults/defaults_test.go index 4dc75ed820b4b..05bf470f18aea 100644 --- a/plugins/processors/defaults/defaults_test.go +++ b/plugins/processors/defaults/defaults_test.go @@ -195,6 +195,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/execd/execd_test.go b/plugins/processors/execd/execd_test.go index 00541621d923c..67d023ce061d4 100644 --- a/plugins/processors/execd/execd_test.go +++ b/plugins/processors/execd/execd_test.go @@ -391,6 +391,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/filepath/filepath_test.go b/plugins/processors/filepath/filepath_test.go index 579120433bdf3..9ea301402dc29 100644 --- a/plugins/processors/filepath/filepath_test.go +++ b/plugins/processors/filepath/filepath_test.go @@ -129,6 +129,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index 52eb31dda3d0c..f4b961bfdb0c8 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -13,7 +13,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/common/parallel" - si "github.com/influxdata/telegraf/plugins/inputs/snmp" "github.com/influxdata/telegraf/plugins/processors" ) @@ -25,7 +24,6 @@ type keyType = string type valType = nameMap type mapFunc func(agent string) (nameMap, error) -type makeTableFunc func(string) (*si.Table, error) type sigMap map[string]chan struct{} @@ -43,8 +41,8 @@ type IfName struct { Log telegraf.Logger `toml:"-"` - ifTable *si.Table - ifXTable *si.Table + ifTable *snmp.Table + ifXTable *snmp.Table cache *TTLCache lock sync.Mutex @@ -52,9 +50,6 @@ type IfName struct { sigs sigMap getMapRemote mapFunc - makeTable makeTableFunc - - translator si.Translator } const minRetry = 5 * time.Minute @@ -65,7 +60,6 @@ func (*IfName) SampleConfig() string { func (d *IfName) Init() error { d.getMapRemote = d.getMapRemoteNoMock - d.makeTable = d.makeTableNoMock c := NewTTLCache(time.Duration(d.CacheTTL), d.CacheSize) d.cache = &c @@ -76,10 +70,6 @@ func (d *IfName) Init() error { return fmt.Errorf("parsing SNMP client config: %w", err) } - // Since OIDs in this plugin are always numeric there is no need - // to translate. - d.translator = si.NewNetsnmpTranslator() - return nil } @@ -287,17 +277,17 @@ func init() { }) } -func (d *IfName) makeTableNoMock(oid string) (*si.Table, error) { +func (d *IfName) makeTable(oid string) (*snmp.Table, error) { var err error - tab := si.Table{ + tab := snmp.Table{ Name: "ifTable", IndexAsTag: true, - Fields: []si.Field{ + Fields: []snmp.Field{ {Oid: oid, Name: "ifName"}, }, } - err = tab.Init(d.translator) + err = tab.Init(nil) if err != nil { //Init already wraps return nil, err @@ -306,10 +296,10 @@ func (d *IfName) makeTableNoMock(oid string) (*si.Table, error) { return &tab, nil } -func (d *IfName) buildMap(gs snmp.GosnmpWrapper, tab *si.Table) (nameMap, error) { +func (d *IfName) buildMap(gs snmp.GosnmpWrapper, tab *snmp.Table) (nameMap, error) { var err error - rtab, err := tab.Build(gs, true, d.translator) + rtab, err := tab.Build(gs, true) if err != nil { //Build already wraps return nil, err diff --git a/plugins/processors/ifname/ifname_test.go b/plugins/processors/ifname/ifname_test.go index 9955c7978e3de..43cf6118a298b 100644 --- a/plugins/processors/ifname/ifname_test.go +++ b/plugins/processors/ifname/ifname_test.go @@ -12,7 +12,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/metric" - si "github.com/influxdata/telegraf/plugins/inputs/snmp" "github.com/influxdata/telegraf/testutil" ) @@ -105,12 +104,7 @@ func TestGetMap(t *testing.T) { CacheTTL: config.Duration(10 * time.Second), } - // Don't run net-snmp commands to look up table names. - d.makeTable = func(string) (*si.Table, error) { - return &si.Table{}, nil - } - err := d.Init() - require.NoError(t, err) + require.NoError(t, d.Init()) expected := nameMap{ 1: "ifname1", @@ -228,6 +222,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/lookup/lookup_test.go b/plugins/processors/lookup/lookup_test.go index 929f63dae48d0..a353e7bc15860 100644 --- a/plugins/processors/lookup/lookup_test.go +++ b/plugins/processors/lookup/lookup_test.go @@ -162,7 +162,7 @@ func TestCasesTracking(t *testing.T) { testutil.RequireMetricsEqual(t, expected, actual) // Simulate output acknowledging delivery - for _, m := range input { + for _, m := range actual { m.Accept() } @@ -170,7 +170,7 @@ func TestCasesTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) }) } diff --git a/plugins/processors/regex/regex_test.go b/plugins/processors/regex/regex_test.go index d03328e0b7bf1..155e458d09a44 100644 --- a/plugins/processors/regex/regex_test.go +++ b/plugins/processors/regex/regex_test.go @@ -1,6 +1,7 @@ package regex import ( + "sync" "testing" "time" @@ -992,6 +993,56 @@ func TestAnyFieldConversion(t *testing.T) { } func TestTrackedMetricNotLost(t *testing.T) { + now := time.Now() + + // Setup raw input and expected output + inputRaw := testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + now, + ) + + expected := []telegraf.Metric{ + metric.New( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + "resp_code_group": "2xx", + "resp_code_text": "OK", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "method": "/search/", + "search_category": "plugins", + "ignore_number": int64(200), + "ignore_bool": true, + }, + now, + ), + } + + // Create fake notification for testing + var mu sync.Mutex + delivered := make([]telegraf.DeliveryInfo, 0, 1) + notify := func(di telegraf.DeliveryInfo) { + mu.Lock() + defer mu.Unlock() + delivered = append(delivered, di) + } + + // Convert raw input to tracking metric + input, _ := metric.WithTracking(inputRaw, notify) + + // Prepare and start the plugin regex := Regex{ Tags: []converter{ { @@ -1025,32 +1076,19 @@ func TestTrackedMetricNotLost(t *testing.T) { } require.NoError(t, regex.Init()) - m := newM2().Copy() - var delivered bool - notify := func(telegraf.DeliveryInfo) { - delivered = true - } - m, _ = metric.WithTracking(m, notify) - processed := regex.Apply(m) - processed[0].Accept() + // Process expected metrics and compare with resulting metrics + actual := regex.Apply(input) + testutil.RequireMetricsEqual(t, expected, actual) - expectedFields := map[string]interface{}{ - "request": "/api/search/?category=plugins&q=regex&sort=asc", - "method": "/search/", - "search_category": "plugins", - "ignore_number": int64(200), - "ignore_bool": true, - } - expectedTags := map[string]string{ - "verb": "GET", - "resp_code": "200", - "resp_code_group": "2xx", - "resp_code_text": "OK", + // Simulate output acknowledging delivery + for _, m := range actual { + m.Accept() } - require.Equal(t, expectedFields, processed[0].Fields()) - require.Equal(t, expectedTags, processed[0].Tags()) - require.Eventually(t, func() bool { - return delivered - }, time.Second, 100*time.Millisecond, "metric not delivered") + // Check delivery + require.Eventuallyf(t, func() bool { + mu.Lock() + defer mu.Unlock() + return len(delivered) == 1 + }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/rename/rename_test.go b/plugins/processors/rename/rename_test.go index 46716d51e0e34..9edb1c3716454 100644 --- a/plugins/processors/rename/rename_test.go +++ b/plugins/processors/rename/rename_test.go @@ -124,6 +124,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/reverse_dns/reverse_dns_test.go b/plugins/processors/reverse_dns/reverse_dns_test.go index e6a41334437dc..b488b8effb20d 100644 --- a/plugins/processors/reverse_dns/reverse_dns_test.go +++ b/plugins/processors/reverse_dns/reverse_dns_test.go @@ -128,6 +128,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/s2geo/s2geo_test.go b/plugins/processors/s2geo/s2geo_test.go index bab79908c4156..35f14b5c0dc9d 100644 --- a/plugins/processors/s2geo/s2geo_test.go +++ b/plugins/processors/s2geo/s2geo_test.go @@ -118,6 +118,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/scale/scale_test.go b/plugins/processors/scale/scale_test.go index 3c2b3c96e0a44..490c5700af119 100644 --- a/plugins/processors/scale/scale_test.go +++ b/plugins/processors/scale/scale_test.go @@ -546,6 +546,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index d6da6496def80..ed23e91fef923 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -1,6 +1,8 @@ package strings import ( + "strconv" + "sync" "testing" "time" @@ -1158,3 +1160,42 @@ func TestValidUTF8(t *testing.T) { }) } } + +func TestTrackedMetricNotLost(t *testing.T) { + var mu sync.Mutex + delivered := make([]telegraf.DeliveryInfo, 0, 3) + notify := func(di telegraf.DeliveryInfo) { + mu.Lock() + defer mu.Unlock() + delivered = append(delivered, di) + } + input := make([]telegraf.Metric, 0, 3) + expected := make([]telegraf.Metric, 0, 6) + for i := 0; i < 3; i++ { + strI := strconv.Itoa(i) + + m := metric.New("m"+strI, map[string]string{}, map[string]interface{}{"message": "test" + string([]byte{0xff}) + strI}, time.Unix(0, 0)) + tm, _ := metric.WithTracking(m, notify) + input = append(input, tm) + + m = metric.New("m"+strI, map[string]string{}, map[string]interface{}{"message": "test" + strI}, time.Unix(0, 0)) + expected = append(expected, m) + } + + // Process expected metrics and compare with resulting metrics + plugin := &Strings{ValidUTF8: []converter{{Field: "message", Replacement: ""}}} + actual := plugin.Apply(input...) + testutil.RequireMetricsEqual(t, expected, actual) + + // Simulate output acknowledging delivery + for _, m := range actual { + m.Accept() + } + + // Check delivery + require.Eventuallyf(t, func() bool { + mu.Lock() + defer mu.Unlock() + return len(input) == len(delivered) + }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) +} diff --git a/plugins/processors/tag_limit/tag_limit_test.go b/plugins/processors/tag_limit/tag_limit_test.go index 91cd06ffabf8c..29bc03d0ea978 100644 --- a/plugins/processors/tag_limit/tag_limit_test.go +++ b/plugins/processors/tag_limit/tag_limit_test.go @@ -148,6 +148,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/template/template_test.go b/plugins/processors/template/template_test.go index 026c71b6cb58b..b8a3be5b7f5cc 100644 --- a/plugins/processors/template/template_test.go +++ b/plugins/processors/template/template_test.go @@ -292,12 +292,16 @@ func TestTracking(t *testing.T) { testutil.RequireMetricsEqual(t, expected, actual) // Simulate output acknowledging delivery - input.Accept() + for _, m := range actual { + m.Accept() + } + + // Check delivery // Check delivery require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(delivered) > 0 + return len(delivered) == 1 }, time.Second, 100*time.Millisecond, "%d delivered but 1 expected", len(delivered)) } diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index de148a158a1b0..a03e662c7a495 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -576,6 +576,6 @@ func TestTracking(t *testing.T) { require.Eventuallyf(t, func() bool { mu.Lock() defer mu.Unlock() - return len(expected) == len(delivered) + return len(input) == len(delivered) }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(expected)) } diff --git a/plugins/processors/unpivot/unpivot.go b/plugins/processors/unpivot/unpivot.go index 7163d1b28ffa7..3f41d6bb7b9ba 100644 --- a/plugins/processors/unpivot/unpivot.go +++ b/plugins/processors/unpivot/unpivot.go @@ -64,7 +64,12 @@ func (p *Unpivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric { results := make([]telegraf.Metric, 0, fieldCount) for _, m := range metrics { - base := copyWithoutFields(m) + base := m + if wm, ok := m.(telegraf.UnwrappableMetric); ok { + base = wm.Unwrap() + } + base = copyWithoutFields(base) + for _, field := range m.FieldList() { newMetric := base.Copy() newMetric.AddField(p.ValueKey, field.Value) diff --git a/plugins/processors/unpivot/unpivot_test.go b/plugins/processors/unpivot/unpivot_test.go index 6d3c8795b981c..0152513159ad8 100644 --- a/plugins/processors/unpivot/unpivot_test.go +++ b/plugins/processors/unpivot/unpivot_test.go @@ -1,10 +1,13 @@ package unpivot import ( + "strconv" + "sync" "testing" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -219,3 +222,43 @@ func TestUnpivot_fieldMode(t *testing.T) { }) } } + +func TestTrackedMetricNotLost(t *testing.T) { + var mu sync.Mutex + delivered := make([]telegraf.DeliveryInfo, 0, 3) + notify := func(di telegraf.DeliveryInfo) { + mu.Lock() + defer mu.Unlock() + delivered = append(delivered, di) + } + input := make([]telegraf.Metric, 0, 3) + expected := make([]telegraf.Metric, 0, 6) + for i := 0; i < 3; i++ { + strI := strconv.Itoa(i) + + m := metric.New("m"+strI, map[string]string{}, map[string]interface{}{"x": int64(1), "y": int64(2)}, time.Unix(0, 0)) + tm, _ := metric.WithTracking(m, notify) + input = append(input, tm) + + unpivot1 := metric.New("m"+strI, map[string]string{"name": "x"}, map[string]interface{}{"value": int64(1)}, time.Unix(0, 0)) + unpivot2 := metric.New("m"+strI, map[string]string{"name": "y"}, map[string]interface{}{"value": int64(2)}, time.Unix(0, 0)) + expected = append(expected, unpivot1, unpivot2) + } + + // Process expected metrics and compare with resulting metrics + plugin := &Unpivot{TagKey: "name", ValueKey: "value"} + actual := plugin.Apply(input...) + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) + + // Simulate output acknowledging delivery + for _, m := range actual { + m.Accept() + } + + // Check delivery + require.Eventuallyf(t, func() bool { + mu.Lock() + defer mu.Unlock() + return len(input) == len(delivered) + }, time.Second, 100*time.Millisecond, "%d delivered but %d expected", len(delivered), len(input)) +}