From 72b576bf373ad920289746c6fb9062b6e823f5e6 Mon Sep 17 00:00:00 2001 From: Ricardo Rodriguez Date: Mon, 4 Feb 2019 13:02:35 +0100 Subject: [PATCH] Add collector to get table stats grouped by schema (#354) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add collector to get table stats grouped by schema Signed-off-by: Ricardo Rodríguez --- CHANGELOG.md | 1 + README.md | 1 + collector/info_schema_schemastats.go | 128 ++++++++++++++++++++++ collector/info_schema_schemastats_test.go | 68 ++++++++++++ mysqld_exporter.go | 1 + 5 files changed, 199 insertions(+) create mode 100644 collector/info_schema_schemastats.go create mode 100644 collector/info_schema_schemastats_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index bc63c192..7dd82c4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ The minimum supported MySQL version is now 5.5. * [FEATURE] Add by_user and by_host metrics to info_schema.processlist collector (PR #333) #334 * [FEATURE] Add wsrep_evs_repl_latency metric collecting. (PR #338) * [FEATURE] Add collector for mysql.user (PR #341) +* [FEATURE] Add collector to get table stats grouped by schema (PR #354) ## 0.11.0 / 2018-06-29 diff --git a/README.md b/README.md index 26f68bc5..e513ee0a 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,7 @@ collect.info_schema.query_response_time | 5.5 | Collect collect.info_schema.tables | 5.1 | Collect metrics from information_schema.tables (Enabled by default) collect.info_schema.tables.databases | 5.1 | The list of databases to collect table stats for, or '`*`' for all. collect.info_schema.tablestats | 5.1 | If running with userstat=1, set to true to collect table statistics. +collect.info_schema.schemastats | 5.1 | If running with userstat=1, set to true to collect schema statistics collect.info_schema.userstats | 5.1 | If running with userstat=1, set to true to collect user statistics. collect.perf_schema.eventsstatements | 5.6 | Collect metrics from performance_schema.events_statements_summary_by_digest. collect.perf_schema.eventsstatements.digest_text_limit | 5.6 | Maximum length of the normalized statement text. (default: 120) diff --git a/collector/info_schema_schemastats.go b/collector/info_schema_schemastats.go new file mode 100644 index 00000000..ef03effa --- /dev/null +++ b/collector/info_schema_schemastats.go @@ -0,0 +1,128 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Scrape `information_schema.table_statistics`. + +package collector + +import ( + "context" + "database/sql" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/log" +) + +const schemaStatQuery = ` + SELECT + TABLE_SCHEMA, + SUM(ROWS_READ) AS ROWS_READ, + SUM(ROWS_CHANGED) AS ROWS_CHANGED, + SUM(ROWS_CHANGED_X_INDEXES) AS ROWS_CHANGED_X_INDEXES + FROM information_schema.TABLE_STATISTICS + GROUP BY TABLE_SCHEMA; + ` + +// Metric descriptors. +var ( + infoSchemaStatsRowsReadDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "schema_statistics_rows_read_total"), + "The number of rows read from the schema.", + []string{"schema"}, nil, + ) + infoSchemaStatsRowsChangedDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "schema_statistics_rows_changed_total"), + "The number of rows changed in the schema.", + []string{"schema"}, nil, + ) + infoSchemaStatsRowsChangedXIndexesDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, informationSchema, "schema_statistics_rows_changed_x_indexes_total"), + "The number of rows changed in the schema, multiplied by the number of indexes changed.", + []string{"schema"}, nil, + ) +) + +// ScrapeSchemaStat collects from `information_schema.table_statistics` grouped by schema. +type ScrapeSchemaStat struct{} + +// Name of the Scraper. Should be unique. +func (ScrapeSchemaStat) Name() string { + return "info_schema.schemastats" +} + +// Help describes the role of the Scraper. +func (ScrapeSchemaStat) Help() string { + return "If running with userstat=1, set to true to collect schema statistics" +} + +// Version of MySQL from which scraper is available. +func (ScrapeSchemaStat) Version() float64 { + return 5.1 +} + +// Scrape collects data from database connection and sends it over channel as prometheus metric. +func (ScrapeSchemaStat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric) error { + var varName, varVal string + + err := db.QueryRowContext(ctx, userstatCheckQuery).Scan(&varName, &varVal) + if err != nil { + log.Debugln("Detailed schema stats are not available.") + return nil + } + if varVal == "OFF" { + log.Debugf("MySQL @@%s is OFF.", varName) + return nil + } + + informationSchemaTableStatisticsRows, err := db.QueryContext(ctx, schemaStatQuery) + if err != nil { + return err + } + defer informationSchemaTableStatisticsRows.Close() + + var ( + tableSchema string + rowsRead uint64 + rowsChanged uint64 + rowsChangedXIndexes uint64 + ) + + for informationSchemaTableStatisticsRows.Next() { + err = informationSchemaTableStatisticsRows.Scan( + &tableSchema, + &rowsRead, + &rowsChanged, + &rowsChangedXIndexes, + ) + + if err != nil { + return err + } + ch <- prometheus.MustNewConstMetric( + infoSchemaStatsRowsReadDesc, prometheus.CounterValue, float64(rowsRead), + tableSchema, + ) + ch <- prometheus.MustNewConstMetric( + infoSchemaStatsRowsChangedDesc, prometheus.CounterValue, float64(rowsChanged), + tableSchema, + ) + ch <- prometheus.MustNewConstMetric( + infoSchemaStatsRowsChangedXIndexesDesc, prometheus.CounterValue, float64(rowsChangedXIndexes), + tableSchema, + ) + } + return nil +} + +// check interface +var _ Scraper = ScrapeSchemaStat{} diff --git a/collector/info_schema_schemastats_test.go b/collector/info_schema_schemastats_test.go new file mode 100644 index 00000000..0e2819bc --- /dev/null +++ b/collector/info_schema_schemastats_test.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "context" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/smartystreets/goconvey/convey" + "gopkg.in/DATA-DOG/go-sqlmock.v1" +) + +func TestScrapeSchemaStat(t *testing.T) { + db, mock, err := sqlmock.New() + if err != nil { + t.Fatalf("error opening a stub database connection: %s", err) + } + defer db.Close() + + mock.ExpectQuery(sanitizeQuery(userstatCheckQuery)).WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). + AddRow("userstat", "ON")) + + columns := []string{"TABLE_SCHEMA", "ROWS_READ", "ROWS_CHANGED", "ROWS_CHANGED_X_INDEXES"} + rows := sqlmock.NewRows(columns). + AddRow("mysql", 238, 0, 8). + AddRow("default", 99, 1, 0) + mock.ExpectQuery(sanitizeQuery(schemaStatQuery)).WillReturnRows(rows) + + ch := make(chan prometheus.Metric) + go func() { + if err = (ScrapeSchemaStat{}).Scrape(context.Background(), db, ch); err != nil { + t.Errorf("error calling function on test: %s", err) + } + close(ch) + }() + + expected := []MetricResult{ + {labels: labelMap{"schema": "mysql"}, value: 238}, + {labels: labelMap{"schema": "mysql"}, value: 0}, + {labels: labelMap{"schema": "mysql"}, value: 8}, + {labels: labelMap{"schema": "default"}, value: 99}, + {labels: labelMap{"schema": "default"}, value: 1}, + {labels: labelMap{"schema": "default"}, value: 0}, + } + convey.Convey("Metrics comparison", t, func() { + for _, expect := range expected { + got := readMetric(<-ch) + convey.So(expect, convey.ShouldResemble, got) + } + }) + + // Ensure all SQL queries were executed + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled exceptions: %s", err) + } +} diff --git a/mysqld_exporter.go b/mysqld_exporter.go index 9d1c8d06..a5cb309e 100644 --- a/mysqld_exporter.go +++ b/mysqld_exporter.go @@ -79,6 +79,7 @@ var scrapers = map[collector.Scraper]bool{ collector.ScrapeUserStat{}: false, collector.ScrapeClientStat{}: false, collector.ScrapeTableStat{}: false, + collector.ScrapeSchemaStat{}: false, collector.ScrapeInnodbCmp{}: true, collector.ScrapeInnodbCmpMem{}: true, collector.ScrapeQueryResponseTime{}: true,