From 049a2fddb4f6de0a8f8f50ca31a30586b237204a Mon Sep 17 00:00:00 2001 From: Yan Kalcheevskiy Date: Sun, 31 Aug 2025 14:42:47 +0300 Subject: [PATCH] Refactor CCache to use sync.Map for improved concurrency --- ccache.go | 33 ++++----------------------------- 1 file changed, 4 insertions(+), 29 deletions(-) diff --git a/ccache.go b/ccache.go index d877c8c..25b23d6 100644 --- a/ccache.go +++ b/ccache.go @@ -4,29 +4,18 @@ import ( "sync" ) -type entry[T any] struct { - result T - err error - ready chan struct{} -} - // CCache implements a concurrent cache that memoizes function results. // It is safe for concurrent use and has no size limit. // Multiple goroutines can request the same key concurrently, // but the function will only be executed once. type CCache[T any] struct { - mu sync.Mutex - m map[string]*entry[T] + m sync.Map } // New creates a new concurrent cache. // The cache has no size limit and will grow as needed. func New[T any]() *CCache[T] { - c := &CCache[T]{ - m: make(map[string]*entry[T]), - } - - return c + return &CCache[T]{} } // Do executes and memoizes the result of function f with the given key. @@ -34,21 +23,7 @@ func New[T any]() *CCache[T] { // If multiple goroutines call Do with the same key concurrently, // only one execution of f will occur, and all callers will receive the same result. func (c *CCache[T]) Do(key string, f func() (T, error)) (T, error) { - c.mu.Lock() - e, ok := c.m[key] - if !ok { - e = &entry[T]{ - ready: make(chan struct{}), - } - c.m[key] = e - c.mu.Unlock() - - e.result, e.err = f() - close(e.ready) - } else { - c.mu.Unlock() - <-e.ready - } + v, _ := c.m.LoadOrStore(key, sync.OnceValues(f)) - return e.result, e.err + return v.(func() (T, error))() }