forked from redis/rueidis
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ring.go
135 lines (123 loc) · 2.78 KB
/
ring.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
package rueidis
import (
"sync"
"sync/atomic"
"github.com/rueian/rueidis/internal/cmds"
)
type queue interface {
PutOne(m cmds.Completed) chan RedisResult
PutMulti(m []cmds.Completed) chan RedisResult
NextWriteCmd() (cmds.Completed, []cmds.Completed, chan RedisResult)
WaitForWrite() (cmds.Completed, []cmds.Completed, chan RedisResult)
NextResultCh() (cmds.Completed, []cmds.Completed, chan RedisResult, *sync.Cond)
}
var _ queue = (*ring)(nil)
func newRing(factor int) *ring {
if factor <= 0 {
factor = DefaultRingScale
}
r := &ring{store: make([]node, 2<<(factor-1))}
r.mask = uint64(len(r.store) - 1)
for i := range r.store {
m := &sync.Mutex{}
r.store[i].c1 = sync.NewCond(m)
r.store[i].c2 = sync.NewCond(m)
r.store[i].ch = make(chan RedisResult, 0) // this channel can't be buffered
}
return r
}
type ring struct {
write uint64
_ [7]uint64
read1 uint64
read2 uint64
mask uint64
store []node // store's size must be 2^N to work with the mask
}
type node struct {
c1 *sync.Cond
c2 *sync.Cond
ch chan RedisResult
one cmds.Completed
multi []cmds.Completed
mark uint32
slept bool
}
func (r *ring) PutOne(m cmds.Completed) chan RedisResult {
n := &r.store[atomic.AddUint64(&r.write, 1)&r.mask]
n.c1.L.Lock()
for n.mark != 0 {
n.c1.Wait()
}
n.one = m
n.multi = nil
n.mark = 1
s := n.slept
n.c1.L.Unlock()
if s {
n.c2.Broadcast()
}
return n.ch
}
func (r *ring) PutMulti(m []cmds.Completed) chan RedisResult {
n := &r.store[atomic.AddUint64(&r.write, 1)&r.mask]
n.c1.L.Lock()
for n.mark != 0 {
n.c1.Wait()
}
n.one = cmds.Completed{}
n.multi = m
n.mark = 1
s := n.slept
n.c1.L.Unlock()
if s {
n.c2.Broadcast()
}
return n.ch
}
// NextWriteCmd should be only called by one dedicated thread
func (r *ring) NextWriteCmd() (one cmds.Completed, multi []cmds.Completed, ch chan RedisResult) {
r.read1++
p := r.read1 & r.mask
n := &r.store[p]
n.c1.L.Lock()
if n.mark == 1 {
one, multi, ch = n.one, n.multi, n.ch
n.mark = 2
} else {
r.read1--
}
n.c1.L.Unlock()
return
}
// WaitForWrite should be only called by one dedicated thread
func (r *ring) WaitForWrite() (one cmds.Completed, multi []cmds.Completed, ch chan RedisResult) {
r.read1++
p := r.read1 & r.mask
n := &r.store[p]
n.c1.L.Lock()
for n.mark != 1 {
n.slept = true
n.c2.Wait() // c1 and c2 share the same mutex
n.slept = false
}
one, multi, ch = n.one, n.multi, n.ch
n.mark = 2
n.c1.L.Unlock()
return
}
// NextResultCh should be only called by one dedicated thread
func (r *ring) NextResultCh() (one cmds.Completed, multi []cmds.Completed, ch chan RedisResult, cond *sync.Cond) {
r.read2++
p := r.read2 & r.mask
n := &r.store[p]
cond = n.c1
n.c1.L.Lock()
if n.mark == 2 {
one, multi, ch = n.one, n.multi, n.ch
n.mark = 0
} else {
r.read2--
}
return
}