0-NN
在 Golang 官方扩展库中为我们提供了一个基于权重的信号量 semaphore 并发原语。
n
数据结构
type waiter struct {
n int64
ready chan<- struct{} // Closed when semaphore acquired.
}
// NewWeighted creates a new weighted semaphore with the given
// maximum combined weight for concurrent access.
func NewWeighted(n int64) *Weighted {
w := &Weighted{size: n}
return w
}
// Weighted provides a way to bound concurrent access to a resource.
// The callers can request access with a given weight.
type Weighted struct {
size int64
cur int64
mu sync.Mutex
waiters list.List
}
watier
NewWeighted()n
Weighted
sizecur[0 - size]muwaitersgoroutinegoroutine
cur
方法列表
方法
NewWightednAcquireReleaseTryAcquireAcquirefalse
获取 Acquire 和 TryAcquire
对于获取资源有两种方法,分别为 Acquire() 和 TryAcquire(),两者的区别我们上面已介绍过。
全局锁
获取资源时根据空闲资源情况,可分为三种:
nilctx.Err()
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
// 有可用资源,直接成功返回nil
s.mu.Lock()
if s.size-s.cur >= n && s.waiters.Len() == 0 {
s.cur += n
s.mu.Unlock()
return nil
}
// 请求资源权重远远超出了设置的最大权重和,失败返回 ctx.Err()
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
// 有部分资源可用,将请求者放在等待队列(头部),并通过select 实现通知其它waiters
ready := make(chan struct{})
w := waiter{n: n, ready: ready}
// 放入链表尾部,并返回放入的元素
elem := s.waiters.PushBack(w)
s.mu.Unlock()
select {
case <-ctx.Done():
// 收到外面的控制信号
err := ctx.Err()
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancelation.
// 如果在用户取消之前已经获取了资源,则直接忽略这个信号,返回nil表示成功
err = nil
default:
// 收到控制信息,且还没有获取到资源,就直接将原来添加的 waiter 删除
isFront := s.waiters.Front() == elem
// 则将其从链接删除 上面 ctx.Done()
s.waiters.Remove(elem)
// 如果当前元素正好位于链表最前面,且还存在可用的资源,就通知其它waiters
if isFront && s.size > s.cur {
s.notifyWaiters()
}
}
s.mu.Unlock()
return err
case <-ready:
return nil
}
}
selectselect
根据可用计数器信息,可分三种情况:
// TryAcquire acquires the semaphore with a weight of n without blocking.
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
func (s *Weighted) TryAcquire(n int64) bool {
s.mu.Lock()
success := s.size-s.cur >= n && s.waiters.Len() == 0
if success {
s.cur += n
}
s.mu.Unlock()
return success
}
释放 Release
waiters
// Release releases the semaphore with a weight of n.
func (s *Weighted) Release(n int64) {
s.mu.Lock()
s.cur -= n
if s.cur < 0 {
s.mu.Unlock()
panic("semaphore: released more than held")
}
s.notifyWaiters()
s.mu.Unlock()
}
通知机制
forwaiterWeighted.cur空闲资源数量 < watier.n
func (s *Weighted) notifyWaiters() {
for {
next := s.waiters.Front()
if next == nil {
break // No more waiters blocked.
}
w := next.Value.(waiter)
if s.size-s.cur < w.n {
// Not enough tokens for the next waiter. We could keep going (to try to
// find a waiter with a smaller request), but under load that could cause
// starvation for large requests; instead, we leave all remaining waiters
// blocked.
//
// Consider a semaphore used as a read-write lock, with N tokens, N
// readers, and one writer. Each reader can Acquire(1) to obtain a read
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
// of the readers. If we allow the readers to jump ahead in the queue,
// the writer will starve — there is always one token available for every
// reader.
break
}
s.cur += w.n
s.waiters.Remove(next)
close(w.ready)
}
}
可以看到如果一个链表里有多个等待者,其中一个等待者需要的资源(权重)比较多的时候,当前 watier 会出现长时间的阻塞(即使当前可用资源足够其它waiter执行,期间会有一些资源浪费), 直到有足够的资源可以让这个等待者执行,然后继续执行它后面的等待者。
使用示例
工作池goroutine
1-32
package main
import (
"context"
"fmt"
"log"
"runtime"
"golang.org/x/sync/semaphore"
)
// Example_workerPool demonstrates how to use a semaphore to limit the number of
// goroutines working on parallel tasks.
//
// This use of a semaphore mimics a typical “worker pool” pattern, but without
// the need to explicitly shut down idle workers when the work is done.
func main() {
ctx := context.TODO()
// 权重值为逻辑cpu个数
var (
maxWorkers = runtime.GOMAXPROCS(0)
sem = semaphore.NewWeighted(int64(maxWorkers))
out = make([]int, 32)
)
// Compute the output using up to maxWorkers goroutines at a time.
for i := range out {
// When maxWorkers goroutines are in flight, Acquire blocks until one of the
// workers finishes.
if err := sem.Acquire(ctx, 1); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
break
}
go func(i int) {
defer sem.Release(1)
out[i] = collatzSteps(i + 1)
}(i)
}
// 如果使用了 errgroup 原语则不需要下面这段语句
if err := sem.Acquire(ctx, int64(maxWorkers)); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
}
fmt.Println(out)
}
// collatzSteps computes the number of steps to reach 1 under the Collatz
// conjecture. (See https://en.wikipedia.org/wiki/Collatz_conjecture.)
func collatzSteps(n int) (steps int) {
if n <= 0 {
panic("nonpositive input")
}
for ; n > 1; steps++ {
if steps < 0 {
panic("too many steps")
}
if n%2 == 0 {
n /= 2
continue
}
const maxInt = int(^uint(0) >> 1)
if n > (maxInt-1)/3 {
panic("overflow")
}
n = 3*n + 1
}
return steps
}
forsem.Acquire(ctx, 1)32-maxWorkers
sem.Acquire(ctx, int64(maxWorkers))maxWorkerserrgroup
errgroup
func main() {
ctx := context.TODO()
var (
maxWorkers = runtime.GOMAXPROCS(0)
sem = semaphore.NewWeighted(int64(maxWorkers))
out = make([]int, 32)
)
group, _ := errgroup.WithContext(context.Background())
for i := range out {
if err := sem.Acquire(ctx, 1); err != nil {
log.Printf("Failed to acquire semaphore: %v", err)
break
}
group.Go(func() error {
go func(i int) {
defer sem.Release(1)
out[i] = collatzSteps(i + 1)
}(i)
return nil
})
}
// 这里会阻塞,直到所有goroutine都执行完毕
if err := group.Wait(); err != nil {
fmt.Println(err)
}
fmt.Println(out)
}