func do() int {
m := make(chan bool, 1)
var n int64
var w sync.WaitGroup
for i := 0; i < 1000; i++ {
w.Add(1)
go func() {
m <- true
n++ // this would be a data race, however the channel here acts like a lock (a counting semaphore of count 1 is a lock)
<-m
w.Done()
}()
}
w.Wait()
return int(n)
}
n++ at the same time, one of the increments can be lostsync.Mutex and change to using m.Lock() and m.Unlock() to get the equivalent behaviour using a more explicit mutextype SafeMap struct {
sync.Mutex
m map[string]int
}
func(s *SafeMap) Incr(key string) {
s.Lock()
defer s.Unlock() // useful habit to defer unlock
s.m[key]++
}
sync.atomicsync.atomic offers some hardware dependent atomic operations like atomic add, CAS etc.sync.Oncevar once sync.Once
var x *someSingleton
func initSingleton() {
x = NewSingleton()
}
func handle(w http.ResponseWriter, r *http.Request) {
once.Do(initSingleton)
//...
}
once.Do ensures initSingleton runs only once
if x == nil and assigning isn’t threadsafe!!sync.Poolsync.Pool is a way to store a threadsafe pool of objects that can be used for reusevar bufPool = sync.Pool {
New: func() any {
return new(bytes.Buffer)
},
}
func Log(w io.Writer, key, val string) {
b := bufPool.Get().(*bytes.Buffer) // reflection
b.Reset()
// use b
w.Write(b.Bytes())
bufPool.Put(b) // return b to the pool
}
New function in the struct is called whenever Get() is called and there are no objects in the pool to use (in this case it just creates and returns a new byte buffer)go run -race .)