-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconsumer.go
332 lines (291 loc) · 10.8 KB
/
consumer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
package rabbit
import (
"errors"
"fmt"
"reflect"
"sync/atomic"
"time"
"github.com/jd78/partitioner"
"github.com/streadway/amqp"
)
type Handler func(message interface{}, header map[string]interface{}) HandlerResponse
type IConsumer interface {
AddHandler(messageType string, concreteType reflect.Type, handler Handler)
AddRetryHandler(messageType string, concreteType reflect.Type, handler Handler, maxRetries int)
StartConsuming(queue string, ack, activePassive bool, activePassiveRetryInterval time.Duration,
concurrentConsumers int, args map[string]interface{}) string
StartConsumingPartitions(queue string, ack, activePassive bool, activePassiveRetryInterval,
maxWaitingTimeRetryIntervalOnPartitionError time.Duration, partitions int,
partitionResolver map[reflect.Type]func(message interface{}) int64, args map[string]interface{}) string
}
type consumer struct {
handlers map[string]Handler
types map[string]reflect.Type
maxRetries map[string]int
log *rabbitLogger
channel *amqp.Channel
consumerRunning map[string]bool
requeueWaitingTimeOnError time.Duration
}
var roundrobin int64
type partition struct {
message interface{}
partitionResolver map[reflect.Type]func(message interface{}) int64
}
func (p partition) GetPartition() int64 {
t := reflect.TypeOf(p.message)
if _, exists := p.partitionResolver[t]; exists {
return p.partitionResolver[t](p.message)
}
return atomic.AddInt64(&roundrobin, 1)
}
//requeueWaitingTimeOnError: time interval requeueing a message in case of handler error (message ordering will be lost). Takes effect if enableRetries is false
func (r *rabbit) configureConsumer(prefetch int, requeueWaitingTimeOnError time.Duration) IConsumer {
channel, err := r.connection.Channel()
checkError(err, "Error creating the producing channel", r.log)
qErr := channel.Qos(prefetch, 0, false)
checkError(qErr, "Error assigning prefetch on the channel", r.log)
go func() {
ch := make(chan *amqp.Error)
channel.NotifyClose(ch)
err := <-ch
checkError(err, "Channel closed", r.log)
}()
go func() {
ch := make(chan bool)
channel.NotifyFlow(ch)
for {
status := <-ch
if r.log.logLevel >= Warn {
r.log.warn(fmt.Sprintf("channel flow detected - flow enabled: %t", status))
}
}
}()
h := make(map[string]Handler)
t := make(map[string]reflect.Type)
retries := make(map[string]int)
return &consumer{h, t, retries, r.log, channel, make(map[string]bool), requeueWaitingTimeOnError}
}
func (c *consumer) handlerExists(messageType string) bool {
_, exists := c.handlers[messageType]
return exists
}
func (c *consumer) GetHandlerKeys() []string {
keys := make([]string, 0, len(c.handlers))
for k := range c.handlers {
keys = append(keys, k)
}
return keys
}
func (c *consumer) getMaxRetries(messageType string) (bool, int) {
maxRetries, exists := c.maxRetries[messageType]
return exists, maxRetries
}
func (c *consumer) AddHandler(messageType string, concreteType reflect.Type, handler Handler) {
if c.handlerExists(messageType) {
err := fmt.Errorf("messageType %s already mapped", messageType)
checkError(err, "", c.log)
}
c.handlers[messageType] = handler
c.types[messageType] = concreteType
}
//maxRetries: number of retries before discarding a message. Takes effect if enableRetries is true
func (c *consumer) AddRetryHandler(messageType string, concreteType reflect.Type, handler Handler, maxRetries int) {
if c.handlerExists(messageType) {
err := fmt.Errorf("messageType %s already mapped", messageType)
checkError(err, "", c.log)
}
c.handlers[messageType] = handler
c.types[messageType] = concreteType
c.maxRetries[messageType] = maxRetries
}
func (c *consumer) handle(w amqp.Delivery, message interface{}, ack bool, retried int) {
if w.Redelivered {
if c.log.logLevel >= Info {
c.log.info(fmt.Sprintf("MessageID=%s, CorrelationId=%s, has been redelivered",
w.MessageId, w.CorrelationId))
}
}
handler := c.handlers[w.Type]
response := handler(message, w.Headers)
switch response {
case Completed:
if c.log.logLevel >= Debug {
c.log.debug(fmt.Sprintf("MessageId=%s, CorrelationId=%s, completed.",
w.MessageId, w.CorrelationId))
}
(&envelope{&w}).maybeAckMessage(ack, c.log)
case Requeue:
if c.log.logLevel >= Debug {
c.log.debug(fmt.Sprintf("MessageId=%s, CorrelationId=%s, requeueing message...",
w.MessageId, w.CorrelationId))
}
(&envelope{&w}).maybeRequeueMessage(ack, c.log)
case Reject:
if c.log.logLevel >= Info {
c.log.info(fmt.Sprintf("MessageId=%s, CorrelationId=%s, rejecting message...",
w.MessageId, w.CorrelationId))
}
(&envelope{&w}).maybeRejectMessage(ack, c.log)
case Err:
retryEnabled, maxRetries := c.getMaxRetries(w.Type)
if retryEnabled {
if retried > maxRetries-1 {
if c.log.logLevel >= Warn {
c.log.warn(fmt.Sprintf("MessageId=%s, CorrelationId=%s, max retry reached, rejecting message...",
w.MessageId, w.CorrelationId))
}
(&envelope{&w}).maybeRejectMessage(ack, c.log)
} else {
time.Sleep(time.Duration(retried*100) * time.Millisecond)
if c.log.logLevel >= Debug {
c.log.debug(fmt.Sprintf("MessageId=%s, CorrelationId=%s, retry=%d times, retrying due to error...",
w.MessageId, w.CorrelationId, retried))
}
c.handle(w, message, ack, retried+1)
}
} else {
go func() {
time.Sleep(c.requeueWaitingTimeOnError)
(&envelope{&w}).maybeRequeueMessage(ack, c.log)
}()
}
}
}
func (c *consumer) deserializeMessage(w amqp.Delivery) (interface{}, error) {
obj, err := deserialize(w.Body, ContentType(w.ContentType), c.types[w.Type])
if err != nil {
if c.log.logLevel >= Error {
c.log.err(fmt.Sprintf("MessageID=%s, CorrelationId=%s, could not deserialize the message, requeueing...",
w.MessageId, w.CorrelationId))
}
}
return obj, err
}
//StartConsuming will start a new consumer
//concurrentConsumers will create concurrent go routines that will read from the delivery rabbit channel
//queue: queue name
//ack: true enables ack
//activePassive: enables acrive and sleepy passive consumers
//activePassiveRetryInterval: time interval checking if the queue has a consumer
//concurrentConsumers: number of consumers
//args: consumer args
func (c *consumer) StartConsuming(queue string, ack, activePassive bool, activePassiveRetryInterval time.Duration,
concurrentConsumers int, args map[string]interface{}) string {
if _, exists := c.consumerRunning[queue]; exists {
err := errors.New("Consumer already running, please configure a new consumer for concurrent processing or set the concurrentConsumers")
checkError(err, "Error starting the consumer", c.log)
}
if activePassive {
logOnce := executeOnce{}
for {
qInfo := Queues[queue]
q, err := c.channel.QueueDeclarePassive(qInfo.name, qInfo.durable, qInfo.autoDelete, qInfo.exclusive,
false, qInfo.args)
checkError(err, "Error declaring queue passive", c.log)
if q.Consumers == 0 {
break
} else {
if c.log.logLevel >= Info {
logOnce.MaybeExecute(func() { c.log.info(fmt.Sprintf("Consumer passive on queue %s", queue)) })
}
time.Sleep(activePassiveRetryInterval)
}
}
}
consumerId := getUniqueId()
delivery, err := c.channel.Consume(queue, consumerId, !ack, activePassive, false, false, args)
checkError(err, "Error starting the consumer", c.log)
if activePassive {
if c.log.logLevel >= Info {
c.log.info(fmt.Sprintf("Consumer active on queue %s", queue))
}
}
for i := 0; i < concurrentConsumers; i++ {
go func(work <-chan amqp.Delivery) {
for w := range work {
if !c.handlerExists(w.Type) {
c.log.debug(fmt.Sprintf("MessageId=%s, CorrelationId=%s, no handler registered for %s, registeredTypes=%v",
w.MessageId, w.CorrelationId, w.Type, c.GetHandlerKeys()))
(&envelope{&w}).maybeAckMessage(ack, c.log)
continue
}
message, err := c.deserializeMessage(w)
if err != nil {
(&envelope{&w}).maybeRequeueMessage(ack, c.log)
continue
}
c.handle(w, message, ack, 0)
}
}(delivery)
}
c.consumerRunning[queue] = true
return consumerId
}
//StartConsuming will start a new consumer
//concurrentConsumers will create concurrent go routines that will read from the delivery rabbit channel
//queue: queue name
//ack: true enables ack
//activePassive: enables acrive and sleepy passive consumers
//activePassiveRetryInterval: time interval checking if the queue has a consumer
//maxWaitingTimeRetryIntervalOnPartitionError: Sleep time between retries in case of handler error
//concurrentConsumers: number of consumers
//partitions: number of consurrent/consistent partitions
//partitionResolver: map[reflect.Type]func(message interface{}) int64, for each message type specify a function that will return the key used to partition
//args: consumer args
func (c *consumer) StartConsumingPartitions(queue string, ack, activePassive bool, activePassiveRetryInterval,
maxWaitingTimeRetryIntervalOnPartitionError time.Duration,
partitions int, partitionResolver map[reflect.Type]func(message interface{}) int64, args map[string]interface{}) string {
if _, exists := c.consumerRunning[queue]; exists {
err := errors.New("Consumer already running, please configure a new consumer for concurrent processing or set partitions")
checkError(err, "Error starting the consumer", c.log)
}
if activePassive {
logOnce := executeOnce{}
for {
qInfo := Queues[queue]
q, err := c.channel.QueueDeclarePassive(qInfo.name, qInfo.durable, qInfo.autoDelete, qInfo.exclusive,
false, qInfo.args)
checkError(err, "Error declaring queue passive", c.log)
if q.Consumers == 0 {
break
} else {
if c.log.logLevel >= Info {
logOnce.MaybeExecute(func() { c.log.info(fmt.Sprintf("Consumer passive on queue %s", queue)) })
}
time.Sleep(activePassiveRetryInterval)
}
}
}
consumerId := getUniqueId()
delivery, err := c.channel.Consume(queue, consumerId, !ack, activePassive, false, false, args)
checkError(err, "Error starting the consumer", c.log)
if activePassive {
if c.log.logLevel >= Info {
c.log.info(fmt.Sprintf("Consumer active on queue %s", queue))
}
}
part := partitioner.CreatePartitioner(partitions, maxWaitingTimeRetryIntervalOnPartitionError)
go func(work <-chan amqp.Delivery) {
for w := range work {
if !c.handlerExists(w.Type) {
c.log.debug(fmt.Sprintf("MessageId=%s, CorrelationId=%s, no handler registered for %s, registeredTypes=%v",
w.MessageId, w.CorrelationId, w.Type, c.GetHandlerKeys()))
(&envelope{&w}).maybeAckMessage(ack, c.log)
continue
}
message, err := c.deserializeMessage(w)
if err != nil {
(&envelope{&w}).maybeRequeueMessage(ack, c.log)
continue
}
cw := w
part.HandleInSequence(func(done chan bool) {
c.handle(cw, message, ack, 0)
done <- true
}, partition{message, partitionResolver})
}
}(delivery)
c.consumerRunning[queue] = true
return consumerId
}