diff --git a/docs/clients/promtail/configuration.md b/docs/clients/promtail/configuration.md
index b96dbae7f56cf..bae6c9eb56a71 100644
--- a/docs/clients/promtail/configuration.md
+++ b/docs/clients/promtail/configuration.md
@@ -68,8 +68,8 @@ Supported contents and default values of `config.yaml`:
 
 # Describes how Promtail connects to multiple instances
 # of Loki, sending logs to each.
-# WARNING: If one of the remote Loki servers fails to respond or responds 
-# with any error which is retriable, this will impact sending logs to any 
+# WARNING: If one of the remote Loki servers fails to respond or responds
+# with any error which is retriable, this will impact sending logs to any
 # other configured remote Loki servers.  Sending is done on a single thread!
 # It is generally recommended to run multiple promtail clients in parallel
 # if you want to send to multiple remote Loki instances.
@@ -91,16 +91,19 @@ scrape_configs:
 The `server_config` block configures Promtail's behavior as an HTTP server:
 
 ```yaml
+# Disable the HTTP and GRPC server.
+[disable: <boolean> | default = false]
+
 # HTTP server listen host
 [http_listen_address: <string>]
 
-# HTTP server listen port
+# HTTP server listen port (0 means random port)
 [http_listen_port: <int> | default = 80]
 
 # gRPC server listen host
 [grpc_listen_address: <string>]
 
-# gRPC server listen port
+# gRPC server listen port (0 means random port)
 [grpc_listen_port: <int> | default = 9095]
 
 # Register instrumentation handlers (/metrics, etc.)
@@ -146,7 +149,7 @@ Loki:
 ```yaml
 # The URL where Loki is listening, denoted in Loki as http_listen_address and
 # http_listen_port. If Loki is running in microservices mode, this is the HTTP
-# URL for the Distributor. Path to the push API needs to be included. 
+# URL for the Distributor. Path to the push API needs to be included.
 # Example: http://example.com:3100/loki/api/v1/push
 url: <string>
 
@@ -206,8 +209,8 @@ tls_config:
 
 # Configures how to retry requests to Loki when a request
 # fails.
-# Default backoff schedule: 
-# 0.5s, 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s(4.267m) 
+# Default backoff schedule:
+# 0.5s, 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s(4.267m)
 # For a total time of 511.5s(8.5m) before logs are lost
 backoff_config:
   # Initial backoff time between retries
@@ -311,11 +314,11 @@ Stages serve several purposes, more detail can be found [here](./pipelines.md),
 
 #### docker
 
-The Docker stage parses the contents of logs from Docker containers, and is defined by name with an empty object: 
+The Docker stage parses the contents of logs from Docker containers, and is defined by name with an empty object:
 
 ```yaml
 docker: {}
-``` 
+```
 
 The docker stage will match and parse log lines of this format:
 
@@ -345,11 +348,11 @@ The Docker stage is just a convenience wrapper for this definition:
 
 #### cri
 
-The CRI stage parses the contents of logs from CRI containers, and is defined by name with an empty object: 
+The CRI stage parses the contents of logs from CRI containers, and is defined by name with an empty object:
 
 ```yaml
 cri: {}
-``` 
+```
 
 The CRI  stage will match and parse log lines of this format:
 
@@ -1071,7 +1074,7 @@ scrape_configs:
       - localhost
      labels:
       job: varlogs  # A `job` label is fairly standard in prometheus and useful for linking metrics and logs.
-      host: yourhost # A `host` label will help identify logs from this machine vs others  
+      host: yourhost # A `host` label will help identify logs from this machine vs others
       __path__: /var/log/*.log  # The path matching uses a third party library: https://github.com/bmatcuk/doublestar
 ```
 
diff --git a/pkg/promtail/promtail.go b/pkg/promtail/promtail.go
index 50dd4ccefe150..949b1449d1f7b 100644
--- a/pkg/promtail/promtail.go
+++ b/pkg/promtail/promtail.go
@@ -15,7 +15,7 @@ import (
 type Promtail struct {
 	client         client.Client
 	targetManagers *targets.TargetManagers
-	server         *server.Server
+	server         server.Server
 
 	stopped bool
 	mtx     sync.Mutex
diff --git a/pkg/promtail/server/server.go b/pkg/promtail/server/server.go
index d478acba0cc33..e69d060c42e52 100644
--- a/pkg/promtail/server/server.go
+++ b/pkg/promtail/server/server.go
@@ -1,13 +1,16 @@
 package server
 
 import (
+	"flag"
 	"fmt"
 	"net/http"
 	"net/url"
 	"os"
+	"os/signal"
 	"path"
 	"sort"
 	"strings"
+	"syscall"
 	"text/template"
 
 	logutil "github.com/cortexproject/cortex/pkg/util"
@@ -25,8 +28,13 @@ var (
 	readinessProbeSuccess = []byte("Ready")
 )
 
+type Server interface {
+	Shutdown()
+	Run() error
+}
+
 // Server embed weaveworks server with static file and templating capability
-type Server struct {
+type server struct {
 	*serverww.Server
 	tms               *targets.TargetManagers
 	externalURL       *url.URL
@@ -38,10 +46,20 @@ type Config struct {
 	serverww.Config   `yaml:",inline"`
 	ExternalURL       string `yaml:"external_url"`
 	HealthCheckTarget *bool  `yaml:"health_check_target"`
+	Disable           bool   `yaml:"disable"`
+}
+
+// RegisterFlags adds the flags required to config this to the given FlagSet
+func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
+	cfg.Config.RegisterFlags(f)
+	f.BoolVar(&cfg.Disable, "server.disable", false, "Disable the http and grpc server.")
 }
 
 // New makes a new Server
-func New(cfg Config, tms *targets.TargetManagers) (*Server, error) {
+func New(cfg Config, tms *targets.TargetManagers) (Server, error) {
+	if cfg.Disable {
+		return NoopServer, nil
+	}
 	wws, err := serverww.New(cfg.Config)
 	if err != nil {
 		return nil, err
@@ -58,7 +76,7 @@ func New(cfg Config, tms *targets.TargetManagers) (*Server, error) {
 		healthCheckTargetFlag = *cfg.HealthCheckTarget
 	}
 
-	serv := &Server{
+	serv := &server{
 		Server:            wws,
 		tms:               tms,
 		externalURL:       externalURL,
@@ -75,7 +93,7 @@ func New(cfg Config, tms *targets.TargetManagers) (*Server, error) {
 }
 
 // serviceDiscovery serves the service discovery page.
-func (s *Server) serviceDiscovery(rw http.ResponseWriter, req *http.Request) {
+func (s *server) serviceDiscovery(rw http.ResponseWriter, req *http.Request) {
 	var index []string
 	allTarget := s.tms.AllTargets()
 	for job := range allTarget {
@@ -143,7 +161,7 @@ func (s *Server) serviceDiscovery(rw http.ResponseWriter, req *http.Request) {
 }
 
 // targets serves the targets page.
-func (s *Server) targets(rw http.ResponseWriter, req *http.Request) {
+func (s *server) targets(rw http.ResponseWriter, req *http.Request) {
 	executeTemplate(req.Context(), rw, templateOptions{
 		Data: struct {
 			TargetPools map[string][]targets.Target
@@ -176,7 +194,7 @@ func (s *Server) targets(rw http.ResponseWriter, req *http.Request) {
 }
 
 // ready serves the ready endpoint
-func (s *Server) ready(rw http.ResponseWriter, _ *http.Request) {
+func (s *server) ready(rw http.ResponseWriter, _ *http.Request) {
 	if s.healthCheckTarget && !s.tms.Ready() {
 		http.Error(rw, readinessProbeFailure, http.StatusInternalServerError)
 		return
@@ -213,3 +231,16 @@ func computeExternalURL(u string, port int) (*url.URL, error) {
 
 	return eu, nil
 }
+
+var NoopServer Server = noopServer{}
+
+type noopServer struct{}
+
+func (noopServer) Run() error {
+	sigs := make(chan os.Signal, 1)
+	signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
+	sig := <-sigs
+	level.Info(logutil.Logger).Log("msg", "received shutdown signal", "sig", sig)
+	return nil
+}
+func (noopServer) Shutdown() {}