-
Notifications
You must be signed in to change notification settings - Fork 23
/
Copy pathlist_queue.go
115 lines (98 loc) · 2.27 KB
/
list_queue.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
package queue
import (
"container/list"
"sync"
"sync/atomic"
"time"
)
// NewListQueue create a list queue that specifies the number of worker threads
func NewListQueue(maxThread int) *ListQueue {
return NewListQueueWithMaxLen(maxThread, 0)
}
// NewListQueueWithMaxLen create a list queue that specifies the number of worker threads
// and the maximum number of elements
func NewListQueueWithMaxLen(maxThread, maxLen int) *ListQueue {
return &ListQueue{
maxLen: maxLen,
maxWorker: maxThread,
workers: make([]*worker, maxThread),
workerPool: make(chan chan Jober, maxThread),
list: list.New(),
lock: new(sync.RWMutex),
wg: new(sync.WaitGroup),
}
}
// ListQueue a list task queue for mitigating server pressure in high concurrency situations
// and improving task processing
type ListQueue struct {
maxLen int
maxWorker int
workers []*worker
workerPool chan chan Jober
list *list.List
lock *sync.RWMutex
wg *sync.WaitGroup
running uint32
}
// Run start running queues
func (q *ListQueue) Run() {
if atomic.LoadUint32(&q.running) == 1 {
return
}
atomic.StoreUint32(&q.running, 1)
for i := 0; i < q.maxWorker; i++ {
q.workers[i] = newWorker(q.workerPool, q.wg)
q.workers[i].Start()
}
go q.dispatcher()
}
func (q *ListQueue) dispatcher() {
for {
q.lock.RLock()
if atomic.LoadUint32(&q.running) != 1 && q.list.Len() == 0 {
q.lock.RUnlock()
break
}
ele := q.list.Front()
q.lock.RUnlock()
if ele == nil {
time.Sleep(time.Millisecond * 10)
continue
}
worker := <-q.workerPool
worker <- ele.Value.(Jober)
q.lock.Lock()
q.list.Remove(ele)
q.lock.Unlock()
}
}
// Push put the executable task into the queue
func (q *ListQueue) Push(job Jober) {
if atomic.LoadUint32(&q.running) != 1 {
return
}
if q.maxLen > 0 {
q.lock.RLock()
if q.list.Len() > q.maxLen {
q.lock.RUnlock()
return
}
q.lock.RUnlock()
}
q.wg.Add(1)
q.lock.Lock()
q.list.PushBack(job)
q.lock.Unlock()
}
// Terminate terminate the queue to receive the task and release the resource
func (q *ListQueue) Terminate() {
if atomic.LoadUint32(&q.running) != 1 {
return
}
atomic.StoreUint32(&q.running, 0)
q.wg.Wait()
for i := 0; i < q.maxWorker; i++ {
q.workers[i].Stop()
}
close(q.workerPool)
}