Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

infoschema: add metrics_tables that contain all metrics tables definition #14721

Merged
merged 8 commits into from
Feb 11, 2020
11 changes: 6 additions & 5 deletions executor/cluster_reader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/util/pdapi"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testutil"
pmodel "github.com/prometheus/common/model"
"google.golang.org/grpc"
)
Expand Down Expand Up @@ -85,15 +86,15 @@ func (s *testClusterReaderSuite) TestMetricTableData(c *C) {
rs, err := tk.Se.Execute(ctx, "select * from tidb_query_duration;")
c.Assert(err, IsNil)
result := tk.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows(
"2019-12-23 20:11:35.000000 127.0.0.1:10080 0.9 0.1"))
result.Check(testutil.RowsWithSep("|",
"2019-12-23 20:11:35.000000|127.0.0.1:10080| 0.9|0.1|The quantile of TiDB query durations(second)"))

rs, err = tk.Se.Execute(ctx, "select * from tidb_query_duration where quantile in (0.85, 0.95);")
rs, err = tk.Se.Execute(ctx, "select time,instance,quantile,value from tidb_query_duration where quantile in (0.85, 0.95);")
c.Assert(err, IsNil)
result = tk.ResultSetToResultWithCtx(ctx, rs[0], Commentf("execute sql fail"))
result.Check(testkit.Rows(
"2019-12-23 20:11:35.000000 127.0.0.1:10080 0.85 0.1",
"2019-12-23 20:11:35.000000 127.0.0.1:10080 0.95 0.1"))
"2019-12-23 20:11:35.000000 127.0.0.1:10080 0.85 0.1",
"2019-12-23 20:11:35.000000 127.0.0.1:10080 0.95 0.1"))
}

func (s *testClusterReaderSuite) TestTiDBClusterConfig(c *C) {
Expand Down
34 changes: 24 additions & 10 deletions executor/metric_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"fmt"
"math"
"net/url"
"sort"
"strings"
"time"

Expand Down Expand Up @@ -76,7 +77,7 @@ func (e *MetricRetriever) retrieve(ctx context.Context, sctx sessionctx.Context)
if err != nil {
return nil, err
}
partRows := e.genRows(queryValue, queryRange, quantile)
partRows := e.genRows(queryValue, quantile)
totalRows = append(totalRows, partRows...)
}
return totalRows, nil
Expand Down Expand Up @@ -127,22 +128,22 @@ func (e *MetricRetriever) getQueryRange(sctx sessionctx.Context) promQLQueryRang
return promQLQueryRange{Start: startTime, End: endTime, Step: step}
}

func (e *MetricRetriever) genRows(value pmodel.Value, r promQLQueryRange, quantile float64) [][]types.Datum {
func (e *MetricRetriever) genRows(value pmodel.Value, quantile float64) [][]types.Datum {
var rows [][]types.Datum
switch value.Type() {
case pmodel.ValMatrix:
matrix := value.(pmodel.Matrix)
for _, m := range matrix {
for _, v := range m.Values {
record := e.genRecord(m.Metric, v, r, quantile)
record := e.genRecord(m.Metric, v, quantile)
rows = append(rows, record)
}
}
}
return rows
}

func (e *MetricRetriever) genRecord(metric pmodel.Metric, pair pmodel.SamplePair, r promQLQueryRange, quantile float64) []types.Datum {
func (e *MetricRetriever) genRecord(metric pmodel.Metric, pair pmodel.SamplePair, quantile float64) []types.Datum {
record := make([]types.Datum, 0, 2+len(e.tblDef.Labels)+1)
// Record order should keep same with genColumnInfos.
record = append(record, types.NewTimeDatum(types.NewTime(
Expand All @@ -168,6 +169,7 @@ func (e *MetricRetriever) genRecord(metric pmodel.Metric, pair pmodel.SamplePair
} else {
record = append(record, types.NewFloat64Datum(float64(pair.Value)))
}
record = append(record, types.NewStringDatum(e.tblDef.Comment))
return record
}

Expand Down Expand Up @@ -216,7 +218,13 @@ func (e *MetricSummaryRetriever) retrieve(ctx context.Context, sctx sessionctx.C
}
startTime := e.extractor.StartTime.Format(plannercore.MetricTableTimeFormat)
endTime := e.extractor.EndTime.Format(plannercore.MetricTableTimeFormat)
for name, def := range infoschema.MetricTableMap {
tables := make([]string, 0, len(infoschema.MetricTableMap))
for name := range infoschema.MetricTableMap {
tables = append(tables, name)
}
sort.Strings(tables)
for _, name := range tables {
def := infoschema.MetricTableMap[name]
sqls := e.genMetricQuerySQLS(name, startTime, endTime, def.Quantile, quantiles)
for _, sql := range sqls {
rows, _, err := sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(sql)
Expand All @@ -233,12 +241,12 @@ func (e *MetricSummaryRetriever) retrieve(ctx context.Context, sctx sessionctx.C

func (e *MetricSummaryRetriever) genMetricQuerySQLS(name, startTime, endTime string, quantile float64, quantiles []float64) []string {
if quantile == 0 {
sql := fmt.Sprintf(`select "%s",min(time),sum(value),avg(value),min(value),max(value) from metric_schema.%s where time > '%s' and time < '%s'`, name, name, startTime, endTime)
sql := fmt.Sprintf(`select "%s",min(time),sum(value),avg(value),min(value),max(value),comment from metric_schema.%s where time > '%s' and time < '%s'`, name, name, startTime, endTime)
return []string{sql}
}
sqls := []string{}
for _, quantile := range quantiles {
sql := fmt.Sprintf(`select "%s_%v",min(time),sum(value),avg(value),min(value),max(value) from metric_schema.%s where time > '%s' and time < '%s' and quantile=%v`, name, quantile, name, startTime, endTime, quantile)
sql := fmt.Sprintf(`select "%s_%v",min(time),sum(value),avg(value),min(value),max(value),comment from metric_schema.%s where time > '%s' and time < '%s' and quantile=%v`, name, quantile, name, startTime, endTime, quantile)
sqls = append(sqls, sql)
}
return sqls
Expand All @@ -265,7 +273,13 @@ func (e *MetricSummaryByLabelRetriever) retrieve(ctx context.Context, sctx sessi
}
startTime := e.extractor.StartTime.Format(plannercore.MetricTableTimeFormat)
endTime := e.extractor.EndTime.Format(plannercore.MetricTableTimeFormat)
for name, def := range infoschema.MetricTableMap {
tables := make([]string, 0, len(infoschema.MetricTableMap))
for name := range infoschema.MetricTableMap {
tables = append(tables, name)
}
sort.Strings(tables)
for _, name := range tables {
def := infoschema.MetricTableMap[name]
sqls := e.genMetricQuerySQLS(name, startTime, endTime, quantiles, def)
for _, sql := range sqls {
rows, _, err := sctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(sql)
Expand Down Expand Up @@ -294,10 +308,10 @@ func (e *MetricSummaryByLabelRetriever) genMetricQuerySQLS(name, startTime, endT
for _, quantile := range quantiles {
var sql string
if quantile == 0 {
sql = fmt.Sprintf(`select "%[1]s", %[2]s as label,min(time),sum(value),avg(value),min(value),max(value) from metric_schema.%[1]s where time > '%[3]s' and time < '%[4]s'`,
sql = fmt.Sprintf(`select "%[1]s", %[2]s as label,min(time),sum(value),avg(value),min(value),max(value),comment from metric_schema.%[1]s where time > '%[3]s' and time < '%[4]s'`,
name, labelsColumn, startTime, endTime)
} else {
sql = fmt.Sprintf(`select "%[1]s_%[5]v", %[2]s as label,min(time),sum(value),avg(value),min(value),max(value) from metric_schema.%[1]s where time > '%[3]s' and time < '%[4]s' and quantile=%[5]v`,
sql = fmt.Sprintf(`select "%[1]s_%[5]v", %[2]s as label,min(time),sum(value),avg(value),min(value),max(value),comment from metric_schema.%[1]s where time > '%[3]s' and time < '%[4]s' and quantile=%[5]v`,
name, labelsColumn, startTime, endTime, quantile)
}
if len(def.Labels) > 0 {
Expand Down
1 change: 1 addition & 0 deletions infoschema/metric_schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ func (def *MetricTableDef) genColumnInfos() []columnInfo {
cols = append(cols, columnInfo{"quantile", mysql.TypeDouble, 22, 0, defaultValue, nil})
}
cols = append(cols, columnInfo{"value", mysql.TypeDouble, 22, 0, nil, nil})
cols = append(cols, columnInfo{"comment", mysql.TypeVarchar, 256, 0, nil, nil})
return cols
}

Expand Down
39 changes: 39 additions & 0 deletions infoschema/tables.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,8 @@ const (
tableTiFlashReplica = "TIFLASH_REPLICA"
// TableInspectionResult is the string constant of inspection result table
TableInspectionResult = "INSPECTION_RESULT"
// TableMetricTables is a table that contains all metrics table definition.
TableMetricTables = "METRICS_TABLES"
// TableMetricSummary is a summary table that contains all metrics.
TableMetricSummary = "METRICS_SUMMARY"
// TableMetricSummaryByLabel is a metric table that contains all metrics that group by label info.
Expand Down Expand Up @@ -166,6 +168,7 @@ var tableIDMap = map[string]int64{
TableInspectionResult: autoid.InformationSchemaDBID + 51,
TableMetricSummary: autoid.InformationSchemaDBID + 52,
TableMetricSummaryByLabel: autoid.InformationSchemaDBID + 53,
TableMetricTables: autoid.InformationSchemaDBID + 54,
}

type columnInfo struct {
Expand Down Expand Up @@ -1131,14 +1134,24 @@ var tableInspectionResultCols = []columnInfo{
{"DETAILS", mysql.TypeVarchar, 256, 0, nil, nil},
}

var tableMetricTablesCols = []columnInfo{
{"TABLE_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"PROMQL", mysql.TypeVarchar, 64, 0, nil, nil},
{"LABELS", mysql.TypeVarchar, 64, 0, nil, nil},
{"QUANTILE", mysql.TypeDouble, 22, 0, nil, nil},
{"COMMENT", mysql.TypeVarchar, 256, 0, nil, nil},
}

var tableMetricSummaryCols = []columnInfo{
{"METRIC_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"TIME", mysql.TypeDatetime, -1, 0, nil, nil},
{"SUM_VALUE", mysql.TypeDouble, 22, 0, nil, nil},
{"AVG_VALUE", mysql.TypeDouble, 22, 0, nil, nil},
{"MIN_VALUE", mysql.TypeDouble, 22, 0, nil, nil},
{"MAX_VALUE", mysql.TypeDouble, 22, 0, nil, nil},
{"COMMENT", mysql.TypeVarchar, 256, 0, nil, nil},
}

var tableMetricSummaryByLabelCols = []columnInfo{
{"METRIC_NAME", mysql.TypeVarchar, 64, 0, nil, nil},
{"LABEL", mysql.TypeVarchar, 64, 0, nil, nil},
Expand All @@ -1147,6 +1160,7 @@ var tableMetricSummaryByLabelCols = []columnInfo{
{"AVG_VALUE", mysql.TypeDouble, 22, 0, nil, nil},
{"MIN_VALUE", mysql.TypeDouble, 22, 0, nil, nil},
{"MAX_VALUE", mysql.TypeDouble, 22, 0, nil, nil},
{"COMMENT", mysql.TypeVarchar, 256, 0, nil, nil},
}

func dataForSchemata(ctx sessionctx.Context, schemas []*model.DBInfo) [][]types.Datum {
Expand Down Expand Up @@ -2309,6 +2323,28 @@ func dataForTableTiFlashReplica(schemas []*model.DBInfo) [][]types.Datum {
return rows
}

// dataForTableTiFlashReplica constructs data for all metric table definition.
func dataForMetricTables(ctx sessionctx.Context) [][]types.Datum {
var rows [][]types.Datum
tables := make([]string, 0, len(MetricTableMap))
for name := range MetricTableMap {
tables = append(tables, name)
}
sort.Strings(tables)
for _, name := range tables {
schema := MetricTableMap[name]
record := types.MakeDatums(
name, // METRIC_NAME
schema.PromQL, // PROMQL
strings.Join(schema.Labels, ","), // LABEL
schema.Quantile, // QUANTILE
schema.Comment, // COMMENT
)
rows = append(rows, record)
}
return rows
}

var tableNameToColumns = map[string][]columnInfo{
tableSchemata: schemataCols,
tableTables: tablesCols,
Expand Down Expand Up @@ -2360,6 +2396,7 @@ var tableNameToColumns = map[string][]columnInfo{
TableInspectionResult: tableInspectionResultCols,
TableMetricSummary: tableMetricSummaryCols,
TableMetricSummaryByLabel: tableMetricSummaryByLabelCols,
TableMetricTables: tableMetricTablesCols,
}

func createInfoSchemaTable(_ autoid.Allocators, meta *model.TableInfo) (table.Table, error) {
Expand Down Expand Up @@ -2469,6 +2506,8 @@ func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column)
fullRows, err = dataForTiDBClusterInfo(ctx)
case tableTiFlashReplica:
fullRows = dataForTableTiFlashReplica(dbs)
case TableMetricTables:
fullRows = dataForMetricTables(ctx)
// Data for cluster memory table.
case clusterTableSlowLog, clusterTableProcesslist:
fullRows, err = getClusterMemTableRows(ctx, it.meta.Name.O)
Expand Down
9 changes: 9 additions & 0 deletions infoschema/tables_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -974,6 +974,15 @@ func (s *testTableSuite) TestForTableTiFlashReplica(c *C) {
tk.MustQuery("select TABLE_SCHEMA,TABLE_NAME,REPLICA_COUNT,LOCATION_LABELS,AVAILABLE from information_schema.tiflash_replica").Check(testkit.Rows("test t 2 a,b 1"))
}

func (s *testClusterTableSuite) TestForMetricTables(c *C) {
tk := testkit.NewTestKit(c, s.store)
statistics.ClearHistoryJobs()
tk.MustExec("use information_schema")
tk.MustQuery("select count(*) > 0 from `METRICS_TABLES`").Check(testkit.Rows("1"))
tk.MustQuery("select * from `METRICS_TABLES` where table_name='tidb_qps'").
Check(testutil.RowsWithSep("|", "tidb_qps|sum(rate(tidb_server_query_total{$LABEL_CONDITIONS}[$RANGE_DURATION])) by (result,type,instance)|instance,type,result|0|TiDB query processing numbers per second"))
}

func (s *testClusterTableSuite) TestForClusterServerInfo(c *C) {
tk := testkit.NewTestKit(c, s.store)
instances := []string{
Expand Down