High-performance cache policies and supporting data structures.
This guide covers how to integrate CacheKit into your application.
The CacheBuilder provides a unified, ergonomic API for creating caches with any eviction policy. It hides internal implementation details (like Arc<V> wrapping) and provides a consistent interface.
use cachekit::builder::{CacheBuilder, CachePolicy};
// Create a cache with the builder
let mut cache = CacheBuilder::new(1000).build::<u64, String>(CachePolicy::Lru);
// Standard operations
cache.insert(1, "value".to_string());
let value = cache.get(&1);
cache.clear();
┌─────────────────────────────────────────────────────────────────────────────┐
│ CacheBuilder │
│ │
│ CacheBuilder::new(capacity) │
│ │ │
│ ▼ │
│ .build::<K, V>(policy) │
│ │ │
│ ├─── CachePolicy::Fifo ────► FifoCache<K, V> │
│ ├─── CachePolicy::Lru ─────► LruCore<K, V> │
│ ├─── CachePolicy::LruK ────► LrukCache<K, V> │
│ ├─── CachePolicy::Lfu { bucket_hint } ─► LfuCache<K, V> │
│ ├─── CachePolicy::HeapLfu ─► HeapLfuCache<K, V> │
│ ├─── CachePolicy::TwoQ ────► TwoQCore<K, V> │
│ └─── CachePolicy::S3Fifo ──► S3FifoCache<K, V> │
│ │
│ ▼ │
│ Cache<K, V> (unified wrapper) │
│ ┌─────────────────────────────────────────────────────────────────────┐ │
│ │ .insert(key, value) → Option<V> │ │
│ │ .get(&key) → Option<&V> │ │
│ │ .contains(&key) → bool │ │
│ │ .len() / .is_empty() → usize / bool │ │
│ │ .capacity() → usize │ │
│ │ .clear() │ │
│ └─────────────────────────────────────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────────────────────┘
The Cache<K, V> wrapper requires:
| Type | Bounds | Reason |
|---|---|---|
K |
Copy + Eq + Hash + Ord |
Key indexing, heap ordering (HeapLFU) |
V |
Clone + Debug |
Value extraction, debug formatting |
Each policy requires its feature flag (e.g. policy-lru for CachePolicy::Lru). See Compatibility and Features.
| Policy | Feature | Use Case | Trade-offs |
|---|---|---|---|
Fifo |
policy-fifo |
Simple, predictable eviction | No recency/frequency tracking |
Lru |
policy-lru |
Temporal locality | Vulnerable to scans |
FastLru |
policy-fast-lru |
Maximum single-threaded speed | No Arc wrapping, no concurrent wrapper |
LruK { k } |
policy-lru-k |
Scan resistance | Extra memory for history |
Lfu { bucket_hint } |
policy-lfu |
Stable hot spots | Slow to adapt to changes |
HeapLfu |
policy-heap-lfu |
Large caches, frequent evictions | O(log n) eviction |
TwoQ { probation_frac } |
policy-two-q |
Mixed workloads | Two-queue overhead |
S3Fifo { small_ratio, ghost_ratio } |
policy-s3-fifo |
Scan-heavy workloads | Small + ghost queues |
For advanced use cases requiring policy-specific operations (e.g., touch(), frequency(), k_distance()), use the underlying cache implementations directly.
CoreCache<K, V>
│
┌───────────────┴───────────────┐
│ │
FifoCacheTrait<K, V> MutableCache<K, V>
│
┌──────────────────────┼──────────────────────┐
│ │ │
LruCacheTrait<K, V> LfuCacheTrait<K, V> LrukCacheTrait<K, V>
use std::sync::Arc;
use cachekit::policy::lru::LruCore;
use cachekit::traits::{CoreCache, LruCacheTrait};
let mut cache: LruCore<u64, &str> = LruCore::new(100);
cache.insert(1, Arc::new("first"));
cache.insert(2, Arc::new("second"));
// Touch to mark as recently used
cache.touch(&1);
// Peek at LRU entry
if let Some((key, _)) = cache.peek_lru() {
println!("LRU key: {}", key);
}
// Pop LRU entry
let evicted = cache.pop_lru();
use std::sync::Arc;
use cachekit::policy::lfu::LfuCache;
use cachekit::traits::{CoreCache, LfuCacheTrait};
let mut cache: LfuCache<u64, &str> = LfuCache::new(100);
cache.insert(1, Arc::new("value"));
// Check frequency
println!("Frequency: {:?}", cache.frequency(&1));
// Boost frequency without accessing value
cache.increment_frequency(&1);
// Reset frequency (demote hot entry)
cache.reset_frequency(&1);
use cachekit::policy::lru_k::LrukCache;
use cachekit::traits::{CoreCache, LrukCacheTrait};
// Create LRU-2 cache
let mut cache = LrukCache::with_k(100, 2);
cache.insert(1, "value");
// Check access count and K-distance
println!("Access count: {:?}", cache.access_count(&1));
println!("K-distance: {:?}", cache.k_distance(&1));
// Touch to record access
cache.touch(&1);
Individual cache implementations are NOT thread-safe by default. For concurrent access:
use std::sync::{Arc, RwLock};
use cachekit::policy::lru_k::LrukCache;
use cachekit::traits::CoreCache;
let cache = Arc::new(RwLock::new(LrukCache::<u64, String>::new(100)));
// Read access
{
let guard = cache.read().unwrap();
let _ = guard.contains(&1);
}
// Write access
{
let mut guard = cache.write().unwrap();
guard.insert(1, "value".to_string());
}
use std::sync::{Arc, Mutex};
use cachekit::builder::{CacheBuilder, CachePolicy};
let cache = Arc::new(Mutex::new(
CacheBuilder::new(100).build::<u64, String>(CachePolicy::Lru)
));
// Use from multiple threads
let cache_clone = cache.clone();
std::thread::spawn(move || {
let mut guard = cache_clone.lock().unwrap();
guard.insert(1, "value".to_string());
});
When the metrics feature is enabled, caches expose metrics via metrics_snapshot():
// Requires: cachekit = { ..., features = ["metrics"] }
use cachekit::policy::lru_k::LrukCache;
let mut cache = LrukCache::<u64, String>::new(100);
cache.insert(1, "value".to_string());
cache.get(&1);
#[cfg(feature = "metrics")]
{
let metrics = cache.metrics_snapshot();
println!("Hits: {}", metrics.hits());
println!("Misses: {}", metrics.misses());
println!("Hit rate: {:.2}%", metrics.hit_rate() * 100.0);
}