Skip to content

Commit

Permalink
delete shared metrics between requests
Browse files Browse the repository at this point in the history
Signed-off-by: qizhicheng <qizhicheng@douban.com>
  • Loading branch information
LeoQuote committed Apr 10, 2023
1 parent fad2c0c commit 5632e89
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 81 deletions.
122 changes: 48 additions & 74 deletions collector/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,29 +62,23 @@ var (
).Default("false").Bool()
)

// Metric descriptors.
var (
scrapeDurationDesc = prometheus.NewDesc(
prometheus.BuildFQName(namespace, exporter, "collector_duration_seconds"),
"Collector time duration.",
[]string{"collector"}, nil,
)
)

// Verify if Exporter implements prometheus.Collector
var _ prometheus.Collector = (*Exporter)(nil)

// Exporter collects MySQL metrics. It implements prometheus.Collector.
type Exporter struct {
ctx context.Context
logger log.Logger
dsn string
scrapers []Scraper
metrics Metrics
ctx context.Context
logger log.Logger
dsn string
scrapers []Scraper
up *prometheus.Desc
scrapeFailed *prometheus.Desc
scrapeDurationSeconds *prometheus.Desc
scrapeCollectorSuccess *prometheus.Desc
}

// New returns a new MySQL exporter for the provided DSN.
func New(ctx context.Context, dsn string, metrics Metrics, scrapers []Scraper, logger log.Logger) *Exporter {
func New(ctx context.Context, dsn string, scrapers []Scraper, logger log.Logger) *Exporter {
// Setup extra params for the DSN, default to having a lock timeout.
dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)}

Expand All @@ -104,37 +98,52 @@ func New(ctx context.Context, dsn string, metrics Metrics, scrapers []Scraper, l
logger: logger,
dsn: dsn,
scrapers: scrapers,
metrics: metrics,
up: prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "up"),
"Whether the MySQL server is up.",
nil,
nil,
),
scrapeFailed: prometheus.NewDesc(
prometheus.BuildFQName(namespace, exporter, "last_scrape_failed"),
"Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).",
nil,
nil,
),
scrapeCollectorSuccess: prometheus.NewDesc(
prometheus.BuildFQName(namespace, exporter, "collector_success"),
"mysqld_exporter: Whether a collector succeeded.",
[]string{"collector"},
nil,
),
scrapeDurationSeconds: prometheus.NewDesc(
prometheus.BuildFQName(namespace, exporter, "collector_duration_seconds"),
"Collector time duration.",
[]string{"collector"}, nil,
),
}
}

// Describe implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
ch <- e.metrics.TotalScrapes.Desc()
ch <- e.metrics.Error.Desc()
e.metrics.ScrapeErrors.Describe(ch)
ch <- e.metrics.MySQLUp.Desc()
ch <- e.up
ch <- e.scrapeFailed
ch <- e.scrapeDurationSeconds
ch <- e.scrapeCollectorSuccess
}

// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(e.ctx, ch)

ch <- e.metrics.TotalScrapes
ch <- e.metrics.Error
e.metrics.ScrapeErrors.Collect(ch)
ch <- e.metrics.MySQLUp
}

func (e *Exporter) scrape(ctx context.Context, ch chan<- prometheus.Metric) {
e.metrics.TotalScrapes.Inc()
var err error

scrapeTime := time.Now()
db, err := sql.Open("mysql", e.dsn)
if err != nil {
level.Error(e.logger).Log("msg", "Error opening connection to database", "err", err)
e.metrics.Error.Set(1)
ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, 0.0)
return
}
defer db.Close()
Expand All @@ -147,17 +156,17 @@ func (e *Exporter) scrape(ctx context.Context, ch chan<- prometheus.Metric) {

if err := db.PingContext(ctx); err != nil {
level.Error(e.logger).Log("msg", "Error pinging mysqld", "err", err)
e.metrics.MySQLUp.Set(0)
e.metrics.Error.Set(1)
ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, 0.0)
ch <- prometheus.MustNewConstMetric(e.scrapeFailed, prometheus.GaugeValue, 0.0)
return
}

e.metrics.MySQLUp.Set(1)
e.metrics.Error.Set(0)
ch <- prometheus.MustNewConstMetric(e.up, prometheus.GaugeValue, 1.0)

ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection")
ch <- prometheus.MustNewConstMetric(e.scrapeDurationSeconds, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), "connection")

version := getMySQLVersion(db, e.logger)
lastScrapeError := 0.0
var wg sync.WaitGroup
defer wg.Wait()
for _, scraper := range e.scrapers {
Expand All @@ -170,14 +179,17 @@ func (e *Exporter) scrape(ctx context.Context, ch chan<- prometheus.Metric) {
defer wg.Done()
label := "collect." + scraper.Name()
scrapeTime := time.Now()
collectorSuccess := 1.0
if err := scraper.Scrape(ctx, db, ch, log.With(e.logger, "scraper", scraper.Name())); err != nil {
level.Error(e.logger).Log("msg", "Error from scraper", "scraper", scraper.Name(), "err", err)
e.metrics.ScrapeErrors.WithLabelValues(label).Inc()
e.metrics.Error.Set(1)
lastScrapeError = 1.0
collectorSuccess = 0.0
}
ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), label)
ch <- prometheus.MustNewConstMetric(e.scrapeCollectorSuccess, prometheus.GaugeValue, collectorSuccess, label)
ch <- prometheus.MustNewConstMetric(e.scrapeDurationSeconds, prometheus.GaugeValue, time.Since(scrapeTime).Seconds(), label)
}(scraper)
}
ch <- prometheus.MustNewConstMetric(e.scrapeFailed, prometheus.GaugeValue, lastScrapeError)
}

func getMySQLVersion(db *sql.DB, logger log.Logger) float64 {
Expand All @@ -195,41 +207,3 @@ func getMySQLVersion(db *sql.DB, logger log.Logger) float64 {
}
return versionNum
}

// Metrics represents exporter metrics which values can be carried between http requests.
type Metrics struct {
TotalScrapes prometheus.Counter
ScrapeErrors *prometheus.CounterVec
Error prometheus.Gauge
MySQLUp prometheus.Gauge
}

// NewMetrics creates new Metrics instance.
func NewMetrics() Metrics {
subsystem := exporter
return Metrics{
TotalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "scrapes_total",
Help: "Total number of times MySQL was scraped for metrics.",
}),
ScrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "scrape_errors_total",
Help: "Total number of times an error occurred scraping a MySQL.",
}, []string{"collector"}),
Error: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from MySQL resulted in an error (1 for error, 0 for success).",
}),
MySQLUp: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the MySQL server is up.",
}),
}
}
1 change: 0 additions & 1 deletion collector/exporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ func TestExporter(t *testing.T) {
exporter := New(
context.Background(),
dsn,
NewMetrics(),
[]Scraper{
ScrapeGlobalStatus{},
},
Expand Down
8 changes: 4 additions & 4 deletions mysqld_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func init() {
prometheus.MustRegister(version.NewCollector("mysqld_exporter"))
}

func newHandler(metrics collector.Metrics, scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc {
func newHandler(scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var dsn string
var err error
Expand Down Expand Up @@ -176,7 +176,7 @@ func newHandler(metrics collector.Metrics, scrapers []collector.Scraper, logger

registry := prometheus.NewRegistry()

registry.MustRegister(collector.New(ctx, dsn, metrics, filteredScrapers, logger))
registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger))

gatherers := prometheus.Gatherers{
prometheus.DefaultGatherer,
Expand Down Expand Up @@ -230,7 +230,7 @@ func main() {
enabledScrapers = append(enabledScrapers, scraper)
}
}
handlerFunc := newHandler(collector.NewMetrics(), enabledScrapers, logger)
handlerFunc := newHandler(enabledScrapers, logger)
http.Handle(*metricsPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, handlerFunc))
if *metricsPath != "/" && *metricsPath != "" {
landingConfig := web.LandingConfig{
Expand All @@ -251,7 +251,7 @@ func main() {
}
http.Handle("/", landingPage)
}
http.HandleFunc("/probe", handleProbe(collector.NewMetrics(), enabledScrapers, logger))
http.HandleFunc("/probe", handleProbe(enabledScrapers, logger))

srv := &http.Server{}
if err := web.ListenAndServe(srv, toolkitFlags, logger); err != nil {
Expand Down
4 changes: 2 additions & 2 deletions probe.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ import (
"github.com/prometheus/mysqld_exporter/collector"
)

func handleProbe(metrics collector.Metrics, scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc {
func handleProbe(scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
params := r.URL.Query()
Expand Down Expand Up @@ -57,7 +57,7 @@ func handleProbe(metrics collector.Metrics, scrapers []collector.Scraper, logger
filteredScrapers := filterScrapers(scrapers, collectParams)

registry := prometheus.NewRegistry()
registry.MustRegister(collector.New(ctx, dsn, metrics, filteredScrapers, logger))
registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger))

h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{})
h.ServeHTTP(w, r)
Expand Down

0 comments on commit 5632e89

Please sign in to comment.