A simple and fast, thread-safe cache library for Go with support for multiple eviction policies.
- Thread-safe: Safe for concurrent use with multiple goroutines
- No Eviction: Simple cache that doesn't evict items when full
- LRU Eviction: Least Recently Used eviction policy
- LFU Eviction: Least Frequently Used eviction policy
- TTL Support: Time-To-Live expiration with automatic cleanup
- Dynamic Resizing: Change cache capacity at runtime
- Simple API: Easy to use interface
- Zero Dependencies: No external dependencies
go get github.com/l00pss/littlecachepackage main
import (
"fmt"
"time"
"github.com/l00pss/littlecache"
)
func main() {
// Create a new cache with default configuration (LRU, capacity: 2048)
config := littlecache.DefaultConfig()
cache, err := littlecache.NewLittleCache(config)
if err != nil {
panic(err)
}
// Set values
cache.Set("key1", "value1")
cache.Set("key2", 42)
// Get values
value, exists := cache.Get("key1")
if exists {
fmt.Println("Found:", value)
}
// Check cache size
fmt.Println("Cache size:", cache.Size())
}// NoEviction Cache (doesn't evict items when full)
config := littlecache.Config{
MaxSize: 100,
EvictionPolicy: littlecache.NoEviction,
}
cache, err := littlecache.NewLittleCache(config)
if err != nil {
panic(err)
}
// LRU Cache
lru_config := littlecache.Config{
MaxSize: 100,
EvictionPolicy: littlecache.LRU,
}
lru_cache, err := littlecache.NewLittleCache(lru_config)
if err != nil {
panic(err)
}
// LFU Cache
lfu_config := littlecache.Config{
MaxSize: 100,
EvictionPolicy: littlecache.LFU,
}
lfu_cache, err := littlecache.NewLittleCache(lfu_config)
if err != nil {
panic(err)
}// Create TTL cache with 5 minute default TTL
config := littlecache.Config{
MaxSize: 100,
EvictionPolicy: littlecache.LRU,
}
ttlCache, err := littlecache.NewTTLCacheFromConfig(config, 5*time.Minute)
if err != nil {
panic(err)
}
defer ttlCache.Stop() // Stop cleanup goroutine
// Set with default TTL (5 minutes)
ttlCache.Set("key1", "value1")
// Set with custom TTL
ttlCache.SetWithTTL("key2", "value2", 1*time.Minute)
// Check remaining TTL
if remaining, exists := ttlCache.GetTTL("key1"); exists {
fmt.Printf("Key1 expires in: %v\n", remaining)
}
// Extend TTL
ttlCache.ExtendTTL("key1", 2*time.Minute)// Custom TTL configuration
underlyingCache, err := littlecache.NewLittleCache(littlecache.Config{
MaxSize: 100,
EvictionPolicy: littlecache.LRU,
})
if err != nil {
panic(err)
}
ttlConfig := littlecache.TTLConfig{
UnderlyingCache: underlyingCache,
DefaultTTL: 10 * time.Minute,
CleanupInterval: 30 * time.Second, // How often to run cleanup
}
ttlCache := littlecache.NewTTLCache(ttlConfig)
defer ttlCache.Stop()// Resize cache to new capacity
err := cache.Resize(200)
if err != nil {
panic(err)
}Doesn't evict items when cache reaches capacity. New items are simply not added if the cache is full, but existing items can still be updated.
config := littlecache.Config{
MaxSize: 3,
EvictionPolicy: littlecache.NoEviction,
}
cache, err := littlecache.NewLittleCache(config)
if err != nil {
panic(err)
}
// Fill cache to capacity
cache.Set("key1", "value1")
cache.Set("key2", "value2")
cache.Set("key3", "value3")
// Try to add 4th item - will be ignored since cache is full
cache.Set("key4", "value4")
fmt.Println("Cache size:", cache.Size()) // Output: 3
// key4 won't be found
if _, found := cache.Get("key4"); !found {
fmt.Println("key4 not found - cache was full")
}
// But you can still update existing keys
cache.Set("key1", "updated_value1") // This worksEvicts the least recently accessed item when cache reaches capacity.
config := littlecache.Config{
MaxSize: 100,
EvictionPolicy: littlecache.LRU,
}Evicts the least frequently accessed item when cache reaches capacity. If multiple items have the same frequency, the oldest one is evicted.
config := littlecache.Config{
MaxSize: 100,
EvictionPolicy: littlecache.LFU,
}Automatically expires items after a specified duration. Can be combined with any underlying cache type (LRU or LFU).
// Create TTL cache with LRU as underlying cache
ttlCache, err := littlecache.NewTTLCacheFromConfig(
littlecache.Config{MaxSize: 100, EvictionPolicy: littlecache.LRU},
5*time.Minute, // default TTL
)Set(key string, value interface{})- Add or update a key-value pairGet(key string) (interface{}, bool)- Retrieve a value by keyDelete(key string)- Remove a key-value pairClear()- Remove all key-value pairsSize() int- Get the number of items in cacheResize(newSize int) error- Change cache capacity
SetWithTTL(key string, value interface{}, ttl time.Duration)- Set with custom TTLGetTTL(key string) (time.Duration, bool)- Get remaining time until expirationExtendTTL(key string, additionalTime time.Duration) bool- Extend expiration timeStop()- Stop the cleanup goroutine (important for graceful shutdown)
type Config struct {
MaxSize int // Maximum number of items
EvictionPolicy EvictionPolicy // Eviction policy (NoEviction, LRU, LFU)
}Available Eviction Policies:
NoEviction: No items are evicted when cache is fullLRU: Least Recently Used evictionLFU: Least Frequently Used evictionTTL: Time-To-Live expiration (used with TTL cache wrapper)
type TTLConfig struct {
UnderlyingCache LittleCache // The cache implementation to wrap
DefaultTTL time.Duration // Default expiration time for items
CleanupInterval time.Duration // How often to run expired item cleanup
}
type TTLEntry struct {
Value interface{}
ExpiresAt time.Time
}LittleCache is designed for concurrent use. All operations are protected by read-write mutexes, allowing multiple concurrent reads while ensuring exclusive access for writes.
Run the test suite:
go test
go test -v # verbose outputThis project is licensed under the MIT License - see the LICENSE file for details.
