forked from testcontainers/testcontainers-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreaper.go
131 lines (110 loc) · 3.19 KB
/
reaper.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
package testcontainers
import (
"bufio"
"context"
"fmt"
"github.com/docker/go-connections/nat"
"github.com/testcontainers/testcontainers-go/wait"
"net"
"strings"
"time"
"github.com/pkg/errors"
)
const (
TestcontainerLabel = "org.testcontainers.golang"
TestcontainerLabelSessionID = TestcontainerLabel + ".sessionId"
TestcontainerLabelIsReaper = TestcontainerLabel + ".reaper"
ReaperDefaultImage = "quay.io/testcontainers/ryuk:0.2.3"
)
// ReaperProvider represents a provider for the reaper to run itself with
// The ContainerProvider interface should usually satisfy this as well, so it is pluggable
type ReaperProvider interface {
RunContainer(ctx context.Context, req ContainerRequest) (Container, error)
}
// Reaper is used to start a sidecar container that cleans up resources
type Reaper struct {
Provider ReaperProvider
SessionID string
Endpoint string
}
// NewReaper creates a Reaper with a sessionID to identify containers and a provider to use
func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, reaperImageName string) (*Reaper, error) {
r := &Reaper{
Provider: provider,
SessionID: sessionID,
}
listeningPort := nat.Port("8080/tcp")
// TODO: reuse reaper if there already is one
req := ContainerRequest{
Image: reaperImage(reaperImageName),
ExposedPorts: []string{string(listeningPort)},
Labels: map[string]string{
TestcontainerLabel: "true",
TestcontainerLabelIsReaper: "true",
},
SkipReaper: true,
BindMounts: map[string]string{
"/var/run/docker.sock": "/var/run/docker.sock",
},
AutoRemove: true,
WaitingFor: wait.ForListeningPort(listeningPort),
}
c, err := provider.RunContainer(ctx, req)
if err != nil {
return nil, err
}
endpoint, err := c.PortEndpoint(ctx, "8080", "")
if err != nil {
return nil, err
}
r.Endpoint = endpoint
return r, nil
}
func reaperImage(reaperImageName string) string {
if reaperImageName == "" {
return ReaperDefaultImage
} else {
return reaperImageName
}
}
// Connect runs a goroutine which can be terminated by sending true into the returned channel
func (r *Reaper) Connect() (chan bool, error) {
conn, err := net.DialTimeout("tcp", r.Endpoint, 10*time.Second)
if err != nil {
return nil, errors.Wrap(err, "Connecting to Ryuk on "+r.Endpoint+" failed")
}
terminationSignal := make(chan bool)
go func(conn net.Conn) {
sock := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
defer conn.Close()
labelFilters := []string{}
for l, v := range r.Labels() {
labelFilters = append(labelFilters, fmt.Sprintf("label=%s=%s", l, v))
}
retryLimit := 3
for retryLimit > 0 {
retryLimit--
sock.WriteString(strings.Join(labelFilters, "&"))
sock.WriteString("\n")
if err := sock.Flush(); err != nil {
continue
}
resp, err := sock.ReadString('\n')
if err != nil {
continue
}
if resp == "ACK\n" {
break
}
}
<-terminationSignal
}(conn)
return terminationSignal, nil
}
// Labels returns the container labels to use so that this Reaper cleans them up
func (r *Reaper) Labels() map[string]string {
return map[string]string{
TestcontainerLabel: "true",
TestcontainerLabelSessionID: r.SessionID,
}
}