最终,所有项目最终都需要加快服务响应或进行一些繁重的计算。快速简便的解决方案是使用缓存。通常,有 Redis 或 Memcached,但我们在单实例微服务中并不需要它们。有时在 Go 应用程序中使用简单的内存缓存会更好,今天我想向您展示实现它的主要方法。

简单地图

第一种方式是简单的显式缓存实现。通常,它使用 map 并在那里存储结构。此外,还需要观察密钥的过期时间和缓存大小。

我将所有实现都放在了一个 GitHub 存储库中。

package go_cache

import (
	"errors"
	"sync"
	"time"
)

type user struct {
	Id    int64  `json:"id"`
	Email string `json:"email"`
}

type cachedUser struct {
	user
	expireAtTimestamp int64
}

type localCache struct {
	stop chan struct{}

	wg    sync.WaitGroup
	mu    sync.RWMutex
	users map[int64]cachedUser
}

func newLocalCache(cleanupInterval time.Duration) *localCache {
	lc := &localCache{
		users: make(map[int64]cachedUser),
		stop:  make(chan struct{}),
	}

	lc.wg.Add(1)
	go func(cleanupInterval time.Duration) {
		defer lc.wg.Done()
		lc.cleanupLoop(cleanupInterval)
	}(cleanupInterval)

	return lc
}

func (lc *localCache) cleanupLoop(interval time.Duration) {
	t := time.NewTicker(interval)
	defer t.Stop()

	for {
		select {
		case <-lc.stop:
			return
		case <-t.C:
			lc.mu.Lock()
			for uid, cu := range lc.users {
				if cu.expireAtTimestamp <= time.Now().Unix() {
					delete(lc.users, uid)
				}
			}
			lc.mu.Unlock()
		}
	}
}

func (lc *localCache) stopCleanup() {
	close(lc.stop)
	lc.wg.Wait()
}

func (lc *localCache) update(u user, expireAtTimestamp int64) {
	lc.mu.Lock()
	defer lc.mu.Unlock()

	lc.users[u.Id] = cachedUser{
		user:              u,
		expireAtTimestamp: expireAtTimestamp,
	}
}

var (
	errUserNotInCache = errors.New("the user isn't in cache")
)

func (lc *localCache) read(id int64) (user, error) {
	lc.mu.RLock()
	defer lc.mu.RUnlock()

	cu, ok := lc.users[id]
	if !ok {
		return user{}, errUserNotInCache
	}

	return cu.user, nil
}

func (lc *localCache) delete(id int64) {
	lc.mu.Lock()
	defer lc.mu.Unlock()

	delete(lc.users, id)
}

我们使用用户 ID 作为缓存项的键。由于有地图,所有更新/读取/删除都需要 O(1) 时间。

优点

  • 显式实现
  • 很棒的表演

缺点

  • 为每个数据结构实现缓存的额外时间
  • 额外的时间来测试缓存逻辑
  • 额外的时间来修复错误
gCache 库

gCache 库抽象出缓存管理并包含各种配置。例如,您可以简单地设置缓存驱逐规则、最大大小、过期 TTL 等。

package go_cache

import (
	"errors"
	"fmt"
	"github.com/bluele/gcache"
	"time"
)

type gCache struct {
	users gcache.Cache
}

const (
	cacheSize = 1_000_000
	cacheTTL  = 1 * time.Hour // default expiration
)

func newGCache() *gCache {
	return &gCache{
		users: gcache.New(cacheSize).Expiration(cacheTTL).ARC().Build(),
	}
}

func (gc *gCache) update(u user, expireIn time.Duration) error {
	return gc.users.SetWithExpire(u.Id, u, expireIn)
}

func (gc *gCache) read(id int64) (user, error) {
	val, err := gc.users.Get(id)
	if err != nil {
		if errors.Is(err, gcache.KeyNotFoundError) {
			return user{}, errUserNotInCache
		}

		return user{}, fmt.Errorf("get: %w", err)
	}

	return val.(user), nil
}

func (gc *gCache) delete(id int64) {
	gc.users.Remove(id)
}

优点

  • 生产就绪的缓存管理
  • 适用于任何数据类型的单一接口
  • 不同的缓存驱逐策略:LRU、LFU、ARC

缺点

  • 需要在每次读取时手动转换缓存值,导致性能不佳
  • 图书馆有一段时间没有维护了
BigCache 库

BigCache 库是快速、并发、逐出内存缓存,可在不影响性能的情况下保留大量条目。BigCache 将条目保存在堆上,但忽略了它们的 GC。

package go_cache

import (
	"encoding/json"
	"errors"
	"fmt"
	"github.com/allegro/bigcache"
	"strconv"
	"time"
)

type bigCache struct {
	users *bigcache.BigCache
}

func newBigCache() (*bigCache, error) {
	bCache, err := bigcache.NewBigCache(bigcache.Config{
		// number of shards (must be a power of 2)
		Shards: 1024,

		// time after which entry can be evicted
		LifeWindow: 1 * time.Hour,

		// Interval between removing expired entries (clean up).
		// If set to <= 0 then no action is performed.
		// Setting to < 1 second is counterproductive — bigcache has a one second resolution.
		CleanWindow: 5 * time.Minute,

		// rps * lifeWindow, used only in initial memory allocation
		MaxEntriesInWindow: 1000 * 10 * 60,

		// max entry size in bytes, used only in initial memory allocation
		MaxEntrySize: 500,

		// prints information about additional memory allocation
		Verbose: false,

		// cache will not allocate more memory than this limit, value in MB
		// if value is reached then the oldest entries can be overridden for the new ones
		// 0 value means no size limit
		HardMaxCacheSize: 256,

		// callback fired when the oldest entry is removed because of its expiration time or no space left
		// for the new entry, or because delete was called. A bitmask representing the reason will be returned.
		// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
		OnRemove: nil,

		// OnRemoveWithReason is a callback fired when the oldest entry is removed because of its expiration time or no space left
		// for the new entry, or because delete was called. A constant representing the reason will be passed through.
		// Default value is nil which means no callback and it prevents from unwrapping the oldest entry.
		// Ignored if OnRemove is specified.
		OnRemoveWithReason: nil,
	})
	if err != nil {
		return nil, fmt.Errorf("new big cache: %w", err)
	}

	return &bigCache{
		users: bCache,
	}, nil
}

func (bc *bigCache) update(u user) error {
	bs, err := json.Marshal(&u)
	if err != nil {
		return fmt.Errorf("marshal: %w", err)
	}

	return bc.users.Set(userKey(u.Id), bs)
}

func userKey(id int64) string {
	return strconv.FormatInt(id, 10)
}

func (bc *bigCache) read(id int64) (user, error) {
	bs, err := bc.users.Get(userKey(id))
	if err != nil {
		if errors.Is(err, bigcache.ErrEntryNotFound) {
			return user{}, errUserNotInCache
		}

		return user{}, fmt.Errorf("get: %w", err)
	}

	var u user
	err = json.Unmarshal(bs, &u)
	if err != nil {
		return user{}, fmt.Errorf("unmarshal: %w", err)
	}

	return u, nil
}

func (bc *bigCache) delete(id int64) {
	bc.users.Delete(userKey(id))
}

我们使用 JSON 来编码/解码值,但也有机会使用任何数据格式。例如,其中一种二进制格式——Protobuf,可以显着提高性能。

优点

  • 生产就绪的缓存管理
  • 丰富的缓存配置
  • 保持
  • 缓存不会触发 GC,因此在大缓存大小上表现出色

缺点

  • 需要实现值的编码器/解码器
基准
goos: darwin
goarch: amd64
pkg: go-cache
cpu: Intel(R) Core(TM) i5-8257U CPU @ 1.40GHz
Benchmark_bigCache
Benchmark_bigCache-8     	 1751281	       688.0 ns/op	     390 B/op	       6 allocs/op
Benchmark_gCache
Benchmark_gCache-8       	  772846	      1699 ns/op	     373 B/op	       8 allocs/op
Benchmark_localCache
Benchmark_localCache-8   	 1534795	       756.6 ns/op	     135 B/op	       0 allocs/op
PASS
ok  	go-cache	6.044s
interface{}
结论

我们研究了 Golang 中不同的内存缓存方式。请记住,没有最佳解决方案,这取决于具体情况。使用文章比较解决方案并确定哪一个适合您的项目需求。

推荐阅读: