-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpool.go
77 lines (64 loc) · 1.28 KB
/
pool.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
package srad
import (
"errors"
"sync"
"sync/atomic"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
)
var (
ErrConnClosed = errors.New("grpc: conn closed")
)
type pool struct {
size uint32
cursor uint32
clients []*grpc.ClientConn
fn func() (*grpc.ClientConn, error)
mutex sync.Mutex
}
func newPool(fn func() (*grpc.ClientConn, error), size uint32) (*pool, error) {
clients := make([]*grpc.ClientConn, 0, size)
for i := 0; i < int(size); i++ {
cc, err := fn()
if err != nil {
return nil, err
}
clients = append(clients, cc)
}
return &pool{
size: size,
clients: clients,
fn: fn,
}, nil
}
func (p *pool) Get() (*grpc.ClientConn, error) {
idx := atomic.AddUint32(&p.cursor, 1) % p.size
cc := p.clients[idx]
if cc != nil && p.checkState(cc) == nil {
return cc, nil
}
if cc != nil {
cc.Close()
p.clients[idx] = nil
}
p.mutex.Lock()
defer p.mutex.Unlock()
cc = p.clients[idx]
if cc != nil && p.checkState(cc) == nil {
return cc, nil
}
cc, err := p.fn()
if err != nil {
return nil, err
}
p.clients[idx] = cc
return cc, nil
}
func (p *pool) checkState(cc *grpc.ClientConn) error {
switch cc.GetState() {
case connectivity.TransientFailure, connectivity.Shutdown:
return ErrConnClosed
default:
return nil
}
}