From f7343f51c1feeb92beff86aca2504b6bc690d787 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 30 Jan 2020 11:03:36 -0500 Subject: [PATCH 1/8] loki frontend docs additions --- docs/architecture.md | 32 +++- docs/configuration/query-frontend.md | 240 +++++++++++++++++++++++++++ docs/overview/README.md | 4 + 3 files changed, 275 insertions(+), 1 deletion(-) create mode 100644 docs/configuration/query-frontend.md diff --git a/docs/architecture.md b/docs/architecture.md index 23cd8dd642490..611a3bea54e2d 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -29,7 +29,7 @@ mode. Monolithic mode is the default deployment of Loki when Loki is installed using Helm. When `target` is _not_ set to `all` (i.e., it is set to `querier`, `ingester`, -or `distributor`), then Loki is said to be running in "horizontally scalable", +`query-frontend`, or `distributor`), then Loki is said to be running in "horizontally scalable", or microservices, mode. Each component of Loki, such as the ingesters and distributors, communicate with @@ -170,6 +170,36 @@ set of tokens. This process is used to avoid flushing all chunks when shutting down, which is a slow process. +### Query frontend + +The **query frontend** is an **optional service** providing the querier's API endpoints and can be used to accelerate the read path. When the query frontend is in place, incoming query requests should be directed to the query frontend instead of the queriers. The querier service will be still required within the cluster, in order to execute the actual queries. + +The query frontend internally performs some query adjustments and holds queries in an internal queue. In this setup, queriers act as workers which pull jobs from the queue, execute them, and return them to the query-frontend for aggregation. Queriers need to be configured with the query frontend address (via the `-querier.frontend-address` CLI flag) in order to allow them to connect to the query frontends. + +Query frontends are **stateless**. However, due to how the internal queue works, it's recommended to run a few query frontend replicas to reap the benefit of fair scheduling. Two replicas should suffice in most cases. + +#### Queueing + +The query frontend queuing mechanism is used to: + +* Ensure that large queries, that could cause an out-of-memory (OOM) error in the querier, will be retried on failure. This allows administrators to under-provision memory for queries, or optimistically run more small queries in parallel, which helps to reduce the TCO. +* Prevent multiple large requests from being convoyed on a single querier by distributing them across all queriers using a first-in/first-out queue (FIFO). +* Prevent a single tenant from denial-of-service-ing (DOSing) other tenants by fairly scheduling queries between tenants. + +#### Splitting + +The query frontend splits larger queries into multiple smaller queries, executing these queries in parallel on downstream queriers and stitching the results back together again. This prevents large (multi-day, etc) queries from causing out of memory issues in a single querier and helps to execute them faster. + +#### Caching + +##### Metric Queries + +The query frontend supports caching metric query results and reuses them on subsequent queries. If the cached results are incomplete, the query frontend calculates the required subqueries and executes them in parallel on downstream queriers. The query frontend can optionally align queries with their step parameter to improve the cacheability of the query results. The result cache is compatible with any loki caching backend (currently memcached, redis, and an in-memory cache). + +##### Log Queries - Coming soon! + +Caching log (filter, regexp) queries are under active development. + ### Querier The **querier** service handles queries using the [LogQL](./logql.md) query diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md new file mode 100644 index 0000000000000..2c6d6a201f048 --- /dev/null +++ b/docs/configuration/query-frontend.md @@ -0,0 +1,240 @@ +## Disclaimer + +This aims to be a general purpose example. There are a number of substitutions to make for these to work correctly. These variables take the form of . Override them with specifics to your environment. + +## Configuration + +Use this shared config file to get the benefits of query parallelisation and caching with the query-frontend component. In addition to this configuration, start the querier and query-frontend components with `-target=querier` and `-target=query-frontend`, respectively. + +```yaml +# Disable the requirement that every request to Cortex has a +# X-Scope-OrgID header. `fake` will be substituted in instead. +auth_enabled: false + +# We don't want the usual /api/prom prefix. +http_prefix: + +server: + http_listen_port: 9091 + +query_range: + # make queries more cache-able by aligning them with their step intervals + align_queries_with_step: true + max_retries: 5 + # parallelize queries in 15min intervals + split_queries_by_interval: 15m + cache_results: true + + results_cache: + max_freshness: 10m + cache: + # We're going to use the in-process "FIFO" cache, but you can enable + # memcached below. + enable_fifocache: true + fifocache: + size: 1024 + validity: 24h + + # If you want to use a memcached cluster, configure a headless service + # in Kubernetes and Cortex will discover the individual instances using + # a SRV DNS query. Cortex will then do client-side hashing to spread + # the load evenly. + # memcached: + # memcached_client: + # host: memcached.default.svc.cluster.local + # service: memcached + # consistent_hash: true + +frontend: + # 256 length tenant queues per frontend + max_outstanding_per_tenant: 256 + log_queries_longer_than: 5s + compress_responses: true + +frontend_worker: + address: query-frontend..svc.cluster.local:9095 + grpc_client_config: + max_send_msg_size: 1.048576e+08 + parallelism: 8 +``` + + +## Kubernetes Deployment + +Here's an example k8s deployment which can consume the above configuration yaml. Note that the above configuration snippet should be merged with the rest of your configuration and exposed as the `loki` configmap. + +### Frontend Service +```yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + name: query-frontend + name: query-frontend + namespace: +spec: + ports: + - name: query-frontend-http-metrics + port: 80 + protocol: TCP + targetPort: 80 + - name: query-frontend-grpc + port: 9095 + protocol: TCP + targetPort: 9095 + selector: + name: query-frontend + sessionAffinity: None + type: ClusterIP +``` + +### Frontend Deployment + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: +spec: + minReadySeconds: 10 + progressDeadlineSeconds: 600 + replicas: 2 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + # see https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#kubernetes-io-hostname + topologyKey: kubernetes.io/hostname + containers: + - args: + - -config.file=/etc/loki/config.yaml + - -log.level=debug + - -target=query-frontend + image: grafana/loki: + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 80 + name: http-metrics + protocol: TCP + - containerPort: 9095 + name: grpc + protocol: TCP + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/loki + name: loki + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + name: loki + name: loki +``` + +### Querier Deployment + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + annotations: + labels: + name: querier + name: querier + namespace: +spec: + minReadySeconds: 10 + progressDeadlineSeconds: 600 + replicas: 3 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -config.file=/etc/loki/config.yaml + - -log.level=debug + - -target=querier + image: grafana/loki: + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 80 + name: http-metrics + protocol: TCP + - containerPort: 9095 + name: grpc + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /ready + port: 80 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + limits: + memory: 8Gi + requests: + cpu: "2" + memory: 2Gi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/loki + name: loki + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + name: loki + name: loki +``` diff --git a/docs/overview/README.md b/docs/overview/README.md index 5bffaacc192c6..129e10c8b8b09 100644 --- a/docs/overview/README.md +++ b/docs/overview/README.md @@ -125,6 +125,10 @@ logs stored in long-term storage. It first tries to query all ingesters for in-memory data before falling back to loading data from the backend store. +### Query frontend + +The **query-frotend** service is an optional component in front of a pool of queriers. It's responsible for fairly scheduling requests between them, paralleling them when possible, and caching. + ## Chunk Store The **chunk store** is Loki's long-term data store, designed to support From 3cf641fdbf8eaad036684aefae54fb87ca46282e Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 30 Jan 2020 11:19:02 -0500 Subject: [PATCH 2/8] s/cortex/loki/ Co-Authored-By: Cyril Tovena --- docs/configuration/query-frontend.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md index 2c6d6a201f048..98bcb7e8c7a2e 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/configuration/query-frontend.md @@ -36,7 +36,7 @@ query_range: validity: 24h # If you want to use a memcached cluster, configure a headless service - # in Kubernetes and Cortex will discover the individual instances using + # in Kubernetes and Loki will discover the individual instances using # a SRV DNS query. Cortex will then do client-side hashing to spread # the load evenly. # memcached: From f798bb31bee94143fc941c4d29eaa97eeadcf84a Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 30 Jan 2020 11:19:11 -0500 Subject: [PATCH 3/8] Update docs/configuration/query-frontend.md Co-Authored-By: Cyril Tovena --- docs/configuration/query-frontend.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md index 98bcb7e8c7a2e..5d6aafd694029 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/configuration/query-frontend.md @@ -37,7 +37,7 @@ query_range: # If you want to use a memcached cluster, configure a headless service # in Kubernetes and Loki will discover the individual instances using - # a SRV DNS query. Cortex will then do client-side hashing to spread + # a SRV DNS query. Loki will then do client-side hashing to spread # the load evenly. # memcached: # memcached_client: From ec3158b2c1384347575d4249d5e6806f8826ae69 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 30 Jan 2020 11:39:09 -0500 Subject: [PATCH 4/8] addresses pr comments --- docs/configuration/examples.md | 5 ++ docs/configuration/query-frontend.md | 96 ---------------------------- 2 files changed, 5 insertions(+), 96 deletions(-) diff --git a/docs/configuration/examples.md b/docs/configuration/examples.md index d48e6ef0c78ef..7da0e851d034d 100644 --- a/docs/configuration/examples.md +++ b/docs/configuration/examples.md @@ -4,6 +4,7 @@ 2. [Google Cloud Storage](#google-cloud-storage) 3. [Cassandra Index](#cassandra-index) 4. [AWS](#aws) +5. [Using the query-frontend](#query-frontend) ## Complete Local config @@ -161,3 +162,7 @@ storage_config: s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name s3forcepathstyle: true ``` + +## Query Frontend + +[example configuration](./query-frontend.md) diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md index 5d6aafd694029..32e4d544dc6a8 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/configuration/query-frontend.md @@ -101,29 +101,15 @@ metadata: namespace: spec: minReadySeconds: 10 - progressDeadlineSeconds: 600 replicas: 2 selector: matchLabels: name: query-frontend - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - type: RollingUpdate template: metadata: labels: name: query-frontend spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-frontend - # see https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#kubernetes-io-hostname - topologyKey: kubernetes.io/hostname containers: - args: - -config.file=/etc/loki/config.yaml @@ -145,88 +131,6 @@ spec: requests: cpu: "2" memory: 600Mi - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /etc/loki - name: loki - restartPolicy: Always - terminationGracePeriodSeconds: 30 - volumes: - - configMap: - defaultMode: 420 - name: loki - name: loki -``` - -### Querier Deployment - -```yaml -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - annotations: - labels: - name: querier - name: querier - namespace: -spec: - minReadySeconds: 10 - progressDeadlineSeconds: 600 - replicas: 3 - selector: - matchLabels: - name: querier - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - type: RollingUpdate - template: - metadata: - labels: - name: querier - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: querier - topologyKey: kubernetes.io/hostname - containers: - - args: - - -config.file=/etc/loki/config.yaml - - -log.level=debug - - -target=querier - image: grafana/loki: - imagePullPolicy: IfNotPresent - name: querier - ports: - - containerPort: 80 - name: http-metrics - protocol: TCP - - containerPort: 9095 - name: grpc - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /ready - port: 80 - scheme: HTTP - initialDelaySeconds: 15 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - limits: - memory: 8Gi - requests: - cpu: "2" - memory: 2Gi - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File volumeMounts: - mountPath: /etc/loki name: loki From 92e560c3cb761eb69847251af960c2d2400c2ef7 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 30 Jan 2020 13:37:48 -0500 Subject: [PATCH 5/8] query frontend docs downstream via http, use 9091 as http port --- docs/configuration/query-frontend.md | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md index 32e4d544dc6a8..2afcffc1a9394 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/configuration/query-frontend.md @@ -46,16 +46,9 @@ query_range: # consistent_hash: true frontend: - # 256 length tenant queues per frontend - max_outstanding_per_tenant: 256 log_queries_longer_than: 5s + downstream: querier..svc.cluster.local:9091 compress_responses: true - -frontend_worker: - address: query-frontend..svc.cluster.local:9095 - grpc_client_config: - max_send_msg_size: 1.048576e+08 - parallelism: 8 ``` @@ -75,10 +68,10 @@ metadata: namespace: spec: ports: - - name: query-frontend-http-metrics - port: 80 + - name: query-frontend-http + port: 9091 protocol: TCP - targetPort: 80 + targetPort: 9091 - name: query-frontend-grpc port: 9095 protocol: TCP @@ -119,8 +112,8 @@ spec: imagePullPolicy: IfNotPresent name: query-frontend ports: - - containerPort: 80 - name: http-metrics + - containerPort: 9091 + name: http protocol: TCP - containerPort: 9095 name: grpc From a0c7558abaf52ee260d5c96202f83316b9a096a6 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 31 Jan 2020 09:12:08 -0500 Subject: [PATCH 6/8] removes grpc references in frontend yaml snippet --- docs/configuration/query-frontend.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md index 2afcffc1a9394..f56dd41c277db 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/configuration/query-frontend.md @@ -72,10 +72,6 @@ spec: port: 9091 protocol: TCP targetPort: 9091 - - name: query-frontend-grpc - port: 9095 - protocol: TCP - targetPort: 9095 selector: name: query-frontend sessionAffinity: None @@ -115,9 +111,6 @@ spec: - containerPort: 9091 name: http protocol: TCP - - containerPort: 9095 - name: grpc - protocol: TCP resources: limits: memory: 1200Mi From da5b3124eb7b64a0ac51001633d58718dc36de98 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 31 Jan 2020 10:03:52 -0500 Subject: [PATCH 7/8] more comprehensive query frontend example description --- docs/configuration/query-frontend.md | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md index f56dd41c277db..99b5c9348db65 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/configuration/query-frontend.md @@ -1,8 +1,18 @@ -## Disclaimer +## Kubernetes Query Frontend Example -This aims to be a general purpose example. There are a number of substitutions to make for these to work correctly. These variables take the form of . Override them with specifics to your environment. +### Disclaimer -## Configuration +This aims to be a general purpose example; there are a number of substitutions to make for it to work correctly. These variables take the form of . You should override them with specifics to your environment. + +### Use case + +It's a common occurrence to start running Loki as a single binary while trying it out in order to simplify deployments and defer learning the (initially unnecessary) nitty gritty details. As we become more comfortable with its paradigms and begin migrating towards a more production ready deployment there are a number of things to be aware of. A common bottleneck is on the read path: queries that executed effortlessly on small data sets may churn to a halt on larger ones. Sometimes we can solve this with more queriers. However, that doesn't help when our queries are too large for a single querier to execute. Then we need the query frontend. + +#### Parallelization + +One of the most important functions of the query frontend is the ability to split larger queries into smaller ones, execute them in parallel, and stitch the results back together. How often it splits them is determined by the `querier.split-queries-by-interval` flag or the yaml config `queryrange.split_queriers_by_interval`. With this set to `1h`, the frontend will dissect a day long query into 24 one hour queries, distribute them to the queriers, and collect the results. This is immensely helpful in production environments as it not only allows us to perform larger queries via aggregation, but also evens the work distribution across queriers so that one or two are not stuck with impossibly large queries while others are left idle. + +### Configuration Use this shared config file to get the benefits of query parallelisation and caching with the query-frontend component. In addition to this configuration, start the querier and query-frontend components with `-target=querier` and `-target=query-frontend`, respectively. From 715a241b304222338fa92ee04e4fd5303b5a5d1f Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 31 Jan 2020 11:53:26 -0500 Subject: [PATCH 8/8] makes query frontend example deployable alongside existing loki --- docs/configuration/query-frontend.md | 113 +++++++++++++-------------- 1 file changed, 55 insertions(+), 58 deletions(-) diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md index 99b5c9348db65..91de063084a84 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/configuration/query-frontend.md @@ -12,59 +12,52 @@ It's a common occurrence to start running Loki as a single binary while trying i One of the most important functions of the query frontend is the ability to split larger queries into smaller ones, execute them in parallel, and stitch the results back together. How often it splits them is determined by the `querier.split-queries-by-interval` flag or the yaml config `queryrange.split_queriers_by_interval`. With this set to `1h`, the frontend will dissect a day long query into 24 one hour queries, distribute them to the queriers, and collect the results. This is immensely helpful in production environments as it not only allows us to perform larger queries via aggregation, but also evens the work distribution across queriers so that one or two are not stuck with impossibly large queries while others are left idle. -### Configuration - -Use this shared config file to get the benefits of query parallelisation and caching with the query-frontend component. In addition to this configuration, start the querier and query-frontend components with `-target=querier` and `-target=query-frontend`, respectively. - -```yaml -# Disable the requirement that every request to Cortex has a -# X-Scope-OrgID header. `fake` will be substituted in instead. -auth_enabled: false - -# We don't want the usual /api/prom prefix. -http_prefix: - -server: - http_listen_port: 9091 - -query_range: - # make queries more cache-able by aligning them with their step intervals - align_queries_with_step: true - max_retries: 5 - # parallelize queries in 15min intervals - split_queries_by_interval: 15m - cache_results: true - - results_cache: - max_freshness: 10m - cache: - # We're going to use the in-process "FIFO" cache, but you can enable - # memcached below. - enable_fifocache: true - fifocache: - size: 1024 - validity: 24h - - # If you want to use a memcached cluster, configure a headless service - # in Kubernetes and Loki will discover the individual instances using - # a SRV DNS query. Loki will then do client-side hashing to spread - # the load evenly. - # memcached: - # memcached_client: - # host: memcached.default.svc.cluster.local - # service: memcached - # consistent_hash: true - -frontend: - log_queries_longer_than: 5s - downstream: querier..svc.cluster.local:9091 - compress_responses: true -``` +## Kubernetes Deployment +### ConfigMap -## Kubernetes Deployment +Use this ConfigMap to get the benefits of query parallelisation and caching with the query-frontend component. -Here's an example k8s deployment which can consume the above configuration yaml. Note that the above configuration snippet should be merged with the rest of your configuration and exposed as the `loki` configmap. +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: loki_frontend + namespace: +data: + config.yaml: | + # Disable the requirement that every request to Cortex has a + # X-Scope-OrgID header. `fake` will be substituted in instead. + auth_enabled: false + + # We don't want the usual /api/prom prefix. + http_prefix: + + server: + http_listen_port: 3100 + + query_range: + # make queries more cache-able by aligning them with their step intervals + align_queries_with_step: true + max_retries: 5 + # parallelize queries in 15min intervals + split_queries_by_interval: 15m + cache_results: true + + results_cache: + max_freshness: 10m + cache: + # We're going to use the in-process "FIFO" cache + enable_fifocache: true + fifocache: + size: 1024 + validity: 24h + + frontend: + log_queries_longer_than: 5s + downstream: querier..svc.cluster.local:3100 + compress_responses: true +``` ### Frontend Service ```yaml @@ -79,9 +72,9 @@ metadata: spec: ports: - name: query-frontend-http - port: 9091 + port: 3100 protocol: TCP - targetPort: 9091 + targetPort: 3100 selector: name: query-frontend sessionAffinity: None @@ -114,11 +107,11 @@ spec: - -config.file=/etc/loki/config.yaml - -log.level=debug - -target=query-frontend - image: grafana/loki: - imagePullPolicy: IfNotPresent + image: grafana/loki:latest + imagePullPolicy: Always name: query-frontend ports: - - containerPort: 9091 + - containerPort: 3100 name: http protocol: TCP resources: @@ -129,12 +122,16 @@ spec: memory: 600Mi volumeMounts: - mountPath: /etc/loki - name: loki + name: loki_frontend restartPolicy: Always terminationGracePeriodSeconds: 30 volumes: - configMap: defaultMode: 420 - name: loki - name: loki + name: loki_frontend + name: loki_frontend ``` + +### Grafana + +Once you've deployed these, you'll need your grafana datasource to point to the new frontend service, now available within the cluster at `http://query-frontend..svc.cluster.local:3100`.