-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcache.go
141 lines (128 loc) · 2.96 KB
/
cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
package cache
import (
"container/list"
"sync"
"time"
)
// Getter func must be implemented by client and returns data by it's keys
type Getter func(keys Keys) (Result, error)
// Result represents result returned by cache function
type Result map[int]interface{}
// Keys represents set of cache keys
// It always slice of int for two reasons:
// - we do not have separate Get and GetMulti functions
// - integers are most common used primary keys in db
type Keys []int
type entry struct {
key int
exp time.Time
value interface{}
}
// Options keeps cache options
type Options struct {
MaxEntries int
TTL time.Duration
}
// Cache represents cache
type Cache struct {
getter Getter
data map[int]*list.Element
// opt keeps cache options
opt Options
ll *list.List
mu sync.RWMutex
queue *Queue
}
// New initializes new instance of cache
func New(options ...Options) *Cache {
var o Options
if len(options) == 0 {
o = Options{}
} else {
o = options[0]
}
c := Cache{opt: o}
c.data = make(map[int]*list.Element)
c.ll = list.New()
c.queue = NewQueue()
return &c
}
// Get returns data from cache by keys
//
// If data not exist Getter function will be called to get data
// Data returned by Getter function will be placed into cache
func (c *Cache) Get(keys Keys, getter Getter, queueKey interface{}) (Result, error) {
result := make(Result)
missedKeys := make(Keys, 0)
c.mu.Lock()
now := time.Now()
for _, key := range keys {
if e, ok := c.data[key]; ok {
if c.opt.TTL > 0 && now.After(e.Value.(*entry).exp) {
missedKeys = append(missedKeys, key)
} else {
c.ll.MoveToFront(e)
result[key] = e.Value.(*entry).value
}
} else {
missedKeys = append(missedKeys, key)
}
}
c.mu.Unlock()
if len(missedKeys) == 0 {
return result, nil
}
missedResult, err := c.queue.Do(missedKeys, getter, queueKey)
if err != nil {
return result, err
}
c.Set(missedResult)
for k, d := range missedResult {
result[k] = d
}
return result, nil
}
// Set saves result into cache
func (c *Cache) Set(data Result) {
exp := time.Now().Add(c.opt.TTL)
c.mu.Lock()
for k, v := range data {
if e, ok := c.data[k]; ok {
c.ll.MoveToFront(e)
e.Value.(*entry).value = v
e.Value.(*entry).exp = exp
} else {
c.data[k] = c.ll.PushFront(&entry{key: k, value: v, exp: exp})
}
}
c.mu.Unlock()
if c.opt.MaxEntries != 0 && c.ll.Len() > c.opt.MaxEntries {
c.Flush(c.ll.Len() - c.opt.MaxEntries)
}
}
// Len returns the number of items in the cache
func (c *Cache) Len() int {
return c.ll.Len()
}
// Remove removes the provided keys from the cache.
func (c *Cache) Remove(keys Keys) {
c.mu.Lock()
for _, k := range keys {
if e, ok := c.data[k]; ok {
c.ll.Remove(e)
delete(c.data, k)
}
}
c.mu.Unlock()
}
// Flush removes last (rare used) n elements from cache
func (c *Cache) Flush(n int) {
c.mu.Lock()
for i := 1; i <= n; i++ {
if e := c.ll.Back(); e != nil {
c.ll.Remove(e)
delete(c.data, e.Value.(*entry).key)
}
}
c.mu.Unlock()
}