本篇文章带大家了解一下Golang缓存,深入浅出的介绍一下Golang中的缓存库freecache,希望对大家有所帮助!
sync.Map
但实际应用场景中,key和value是(包含)指针类型数据是很常见的,因此使用缓存框架需要特别注意其对gc影响,从是否对GC影响角度来看缓存框架大致分为2类:
- 零GC开销:比如freecache或bigcache这种,底层基于ringbuf,减小指针个数;
- 有GC开销:直接基于Map来实现的缓存框架。
对于map而言,gc时会扫描所有key/value键值对,如果其都是基本类型,那么gc便不会再扫描。
下面以freecache为例分析下其实现原理,代码示例如下:
func main() {
cacheSize := 100 * 1024 * 1024
cache := freecache.NewCache(cacheSize)
for i := 0; i < N; i++ {
str := strconv.Itoa(i)
_ = cache.Set([]byte(str), []byte(str), 1)
}
now := time.Now()
runtime.GC()
fmt.Printf("freecache, GC took: %s\n", time.Since(now))
_, _ = cache.Get([]byte("aa"))
now = time.Now()
for i := 0; i < N; i++ {
str := strconv.Itoa(i)
_, _ = cache.Get([]byte(str))
}
fmt.Printf("freecache, Get took: %s\n\n", time.Since(now))
}
登录后复制
1 初始化
注意切片为指针类型
type segment struct {
rb RingBuf // ring buffer that stores data
segId int
_ uint32 // 占位
missCount int64
hitCount int64
entryCount int64
totalCount int64 // number of entries in ring buffer, including deleted entries.
totalTime int64 // used to calculate least recent used entry.
timer Timer // Timer giving current time
totalEvacuate int64 // used for debug
totalExpired int64 // used for debug
overwrites int64 // used for debug
touched int64 // used for debug
vacuumLen int64 // up to vacuumLen, new data can be written without overwriting old data.
slotLens [256]int32 // The actual length for every slot.
slotCap int32 // max number of entry pointers a slot can hold.
slotsData []entryPtr // 索引指针
}
func NewCacheCustomTimer(size int, timer Timer) (cache *Cache) {
cache = new(Cache)
for i := 0; i < segmentCount; i++ {
cache.segments[i] = newSegment(size/segmentCount, i, timer)
}
}
func newSegment(bufSize int, segId int, timer Timer) (seg segment) {
seg.rb = NewRingBuf(bufSize, 0)
seg.segId = segId
seg.timer = timer
seg.vacuumLen = int64(bufSize)
seg.slotCap = 1
seg.slotsData = make([]entryPtr, 256*seg.slotCap) // 每个slotData初始化256个单位大小
}
登录后复制
2 读写流程
[]byteSet
_ = cache.Set([]byte(str), []byte(str), 1)
登录后复制
[]entryPtr[]entryPtr
每个segment对应一个lock(sync.Mutex),因此其能够支持较大并发量,而不像sync.Map只有一个锁。
func (cache *Cache) Set(key, value []byte, expireSeconds int) (err error) {
hashVal := hashFunc(key)
segID := hashVal & segmentAndOpVal // 低8位
cache.locks[segID].Lock() // 加锁
err = cache.segments[segID].set(key, value, hashVal, expireSeconds)
cache.locks[segID].Unlock()
}
func (seg *segment) set(key, value []byte, hashVal uint64, expireSeconds int) (err error) {
slotId := uint8(hashVal >> 8)
hash16 := uint16(hashVal >> 16)
slot := seg.getSlot(slotId)
idx, match := seg.lookup(slot, hash16, key)
var hdrBuf [ENTRY_HDR_SIZE]byte
hdr := (*entryHdr)(unsafe.Pointer(&hdrBuf[0]))
if match { // 有数据更新操作
matchedPtr := &slot[idx]
seg.rb.ReadAt(hdrBuf[:], matchedPtr.offset)
hdr.slotId = slotId
hdr.hash16 = hash16
hdr.keyLen = uint16(len(key))
originAccessTime := hdr.accessTime
hdr.accessTime = now
hdr.expireAt = expireAt
hdr.valLen = uint32(len(value))
if hdr.valCap >= hdr.valLen {
// 已存在数据value空间能存下此次value大小
atomic.AddInt64(&seg.totalTime, int64(hdr.accessTime)-int64(originAccessTime))
seg.rb.WriteAt(hdrBuf[:], matchedPtr.offset)
seg.rb.WriteAt(value, matchedPtr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen))
atomic.AddInt64(&seg.overwrites, 1)
return
}
// 删除对应entryPtr,涉及到slotsData内存copy,ringbug中只是标记删除
seg.delEntryPtr(slotId, slot, idx)
match = false
// increase capacity and limit entry len.
for hdr.valCap < hdr.valLen {
hdr.valCap *= 2
}
if hdr.valCap > uint32(maxKeyValLen-len(key)) {
hdr.valCap = uint32(maxKeyValLen - len(key))
}
} else { // 无数据
hdr.slotId = slotId
hdr.hash16 = hash16
hdr.keyLen = uint16(len(key))
hdr.accessTime = now
hdr.expireAt = expireAt
hdr.valLen = uint32(len(value))
hdr.valCap = uint32(len(value))
if hdr.valCap == 0 { // avoid infinite loop when increasing capacity.
hdr.valCap = 1
}
}
// 数据实际长度为 ENTRY_HDR_SIZE=24 + key和value的长度
entryLen := ENTRY_HDR_SIZE + int64(len(key)) + int64(hdr.valCap)
slotModified := seg.evacuate(entryLen, slotId, now)
if slotModified {
// the slot has been modified during evacuation, we need to looked up for the 'idx' again.
// otherwise there would be index out of bound error.
slot = seg.getSlot(slotId)
idx, match = seg.lookup(slot, hash16, key)
// assert(match == false)
}
newOff := seg.rb.End()
seg.insertEntryPtr(slotId, hash16, newOff, idx, hdr.keyLen)
seg.rb.Write(hdrBuf[:])
seg.rb.Write(key)
seg.rb.Write(value)
seg.rb.Skip(int64(hdr.valCap - hdr.valLen))
atomic.AddInt64(&seg.totalTime, int64(now))
atomic.AddInt64(&seg.totalCount, 1)
seg.vacuumLen -= entryLen
return
}
登录后复制
oldOff := seg.rb.End() + seg.vacuumLen - seg.rb.Size()
[]entryPtrseg.expand
写入ringbuf就是执行rb.Write即可。
func (seg *segment) evacuate(entryLen int64, slotId uint8, now uint32) (slotModified bool) {
var oldHdrBuf [ENTRY_HDR_SIZE]byte
consecutiveEvacuate := 0
for seg.vacuumLen < entryLen {
oldOff := seg.rb.End() + seg.vacuumLen - seg.rb.Size()
seg.rb.ReadAt(oldHdrBuf[:], oldOff)
oldHdr := (*entryHdr)(unsafe.Pointer(&oldHdrBuf[0]))
oldEntryLen := ENTRY_HDR_SIZE + int64(oldHdr.keyLen) + int64(oldHdr.valCap)
if oldHdr.deleted { // 已删除
consecutiveEvacuate = 0
atomic.AddInt64(&seg.totalTime, -int64(oldHdr.accessTime))
atomic.AddInt64(&seg.totalCount, -1)
seg.vacuumLen += oldEntryLen
continue
}
expired := oldHdr.expireAt != 0 && oldHdr.expireAt < now
leastRecentUsed := int64(oldHdr.accessTime)*atomic.LoadInt64(&seg.totalCount) <= atomic.LoadInt64(&seg.totalTime)
if expired || leastRecentUsed || consecutiveEvacuate > 5 {
// 可以回收
seg.delEntryPtrByOffset(oldHdr.slotId, oldHdr.hash16, oldOff)
if oldHdr.slotId == slotId {
slotModified = true
}
consecutiveEvacuate = 0
atomic.AddInt64(&seg.totalTime, -int64(oldHdr.accessTime))
atomic.AddInt64(&seg.totalCount, -1)
seg.vacuumLen += oldEntryLen
if expired {
atomic.AddInt64(&seg.totalExpired, 1)
} else {
atomic.AddInt64(&seg.totalEvacuate, 1)
}
} else {
// evacuate an old entry that has been accessed recently for better cache hit rate.
newOff := seg.rb.Evacuate(oldOff, int(oldEntryLen))
seg.updateEntryPtr(oldHdr.slotId, oldHdr.hash16, oldOff, newOff)
consecutiveEvacuate++
atomic.AddInt64(&seg.totalEvacuate, 1)
}
}
}
登录后复制
freecache的Get流程相对来说简单点,通过hash找到对应segment,通过slotId找到对应索引slot,然后通过二分+遍历寻找数据,如果找不到直接返回ErrNotFound,否则更新一些time指标。Get流程还会更新缓存命中率相关指标。
func (cache *Cache) Get(key []byte) (value []byte, err error) {
hashVal := hashFunc(key)
segID := hashVal & segmentAndOpVal
cache.locks[segID].Lock()
value, _, err = cache.segments[segID].get(key, nil, hashVal, false)
cache.locks[segID].Unlock()
return
}
func (seg *segment) get(key, buf []byte, hashVal uint64, peek bool) (value []byte, expireAt uint32, err error) {
hdr, ptr, err := seg.locate(key, hashVal, peek) // hash+定位查找
if err != nil {
return
}
expireAt = hdr.expireAt
if cap(buf) >= int(hdr.valLen) {
value = buf[:hdr.valLen]
} else {
value = make([]byte, hdr.valLen)
}
seg.rb.ReadAt(value, ptr.offset+ENTRY_HDR_SIZE+int64(hdr.keyLen))
}
登录后复制
[]byte
3 总结
从常见的几个缓存框架压测性能来看,Set性能差异较大但还不是数量级别的差距,Get性能差异不大,因此对于绝大多数场景来说不用太关注Set/Get性能,重点应该看功能是否满足业务需求和gc影响,性能压测比较见:https://golang2.eddycjy.com/posts/ch5/04-performance/
缓存有一个特殊的场景,那就是将数据全部缓存在内存,涉及到更新时都是全量更新(替换),该场景下使用freecache,如果size未设置好可能导致部分数据被淘汰,是不符合预期的,这个一定要注意。为了使用freecache避免该问题,需要将size设置"足够大",但也要注意其内存空间占用。
更多编程相关知识,请访问:编程教学!!