use of com.github.benmanes.caffeine.cache.Cache in project caffeine by ben-manes.
the class WriteBehindCacheWriterTest method givenMultipleCacheUpdatesOnSameKey_writeBehindIsCalledWithMostRecentTime.
@Test
public void givenMultipleCacheUpdatesOnSameKey_writeBehindIsCalledWithMostRecentTime() {
AtomicBoolean writerCalled = new AtomicBoolean(false);
AtomicInteger numberOfEntries = new AtomicInteger(0);
AtomicReference<ZonedDateTime> timeInWriteBehind = new AtomicReference<>();
// Given this cache...
Cache<Long, ZonedDateTime> cache = Caffeine.newBuilder().writer(new WriteBehindCacheWriter.Builder<Long, ZonedDateTime>().bufferTime(1, TimeUnit.SECONDS).coalesce(BinaryOperator.maxBy(ZonedDateTime::compareTo)).writeAction(entries -> {
if (entries.isEmpty()) {
return;
}
numberOfEntries.set(entries.size());
ZonedDateTime zonedDateTime = entries.values().iterator().next();
timeInWriteBehind.set(zonedDateTime);
writerCalled.set(true);
}).build()).build();
// When these cache updates happen ...
cache.put(1L, ZonedDateTime.of(2016, 6, 26, 8, 0, 0, 0, ZoneId.systemDefault()));
cache.put(1L, ZonedDateTime.of(2016, 6, 26, 8, 0, 0, 100, ZoneId.systemDefault()));
cache.put(1L, ZonedDateTime.of(2016, 6, 26, 8, 0, 0, 300, ZoneId.systemDefault()));
ZonedDateTime mostRecentTime = ZonedDateTime.of(2016, 6, 26, 8, 0, 0, 500, ZoneId.systemDefault());
cache.put(1L, mostRecentTime);
// Then the write behind action gets 1 entry to write with the most recent time
Awaitility.await().untilTrue(writerCalled);
Assert.assertEquals(1, numberOfEntries.intValue());
Assert.assertEquals(mostRecentTime, timeInWriteBehind.get());
}
use of com.github.benmanes.caffeine.cache.Cache in project caffeine by ben-manes.
the class GuavaCacheFromContext method newGuavaCache.
/** Returns a Guava-backed cache. */
@SuppressWarnings("CheckReturnValue")
public static <K, V> Cache<K, V> newGuavaCache(CacheContext context) {
checkState(!context.isAsync(), "Guava caches are synchronous only");
CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder();
context.guava = builder;
builder.concurrencyLevel(1);
if (context.initialCapacity != InitialCapacity.DEFAULT) {
builder.initialCapacity(context.initialCapacity.size());
}
if (context.isRecordingStats()) {
builder.recordStats();
}
if (context.maximumSize != Maximum.DISABLED) {
if (context.weigher == CacheWeigher.DEFAULT) {
builder.maximumSize(context.maximumSize.max());
} else {
builder.weigher(new GuavaWeigher<Object, Object>(context.weigher));
builder.maximumWeight(context.maximumWeight());
}
}
if (context.afterAccess != Expire.DISABLED) {
builder.expireAfterAccess(context.afterAccess.timeNanos(), TimeUnit.NANOSECONDS);
}
if (context.afterWrite != Expire.DISABLED) {
builder.expireAfterWrite(context.afterWrite.timeNanos(), TimeUnit.NANOSECONDS);
}
if (context.refresh != Expire.DISABLED) {
builder.refreshAfterWrite(context.refresh.timeNanos(), TimeUnit.NANOSECONDS);
}
if (context.expires() || context.refreshes()) {
builder.ticker(context.ticker());
}
if (context.keyStrength == ReferenceType.WEAK) {
builder.weakKeys();
} else if (context.keyStrength == ReferenceType.SOFT) {
throw new IllegalStateException();
}
if (context.valueStrength == ReferenceType.WEAK) {
builder.weakValues();
} else if (context.valueStrength == ReferenceType.SOFT) {
builder.softValues();
}
if (context.removalListenerType != Listener.DEFAULT) {
boolean translateZeroExpire = (context.afterAccess == Expire.IMMEDIATELY) || (context.afterWrite == Expire.IMMEDIATELY);
builder.removalListener(new GuavaRemovalListener<>(translateZeroExpire, context.removalListener));
}
Ticker ticker = (context.ticker == null) ? Ticker.systemTicker() : context.ticker();
if (context.loader == null) {
context.cache = new GuavaCache<>(builder.<Integer, Integer>build(), ticker, context.isRecordingStats());
} else if (context.loader().isBulk()) {
context.cache = new GuavaLoadingCache<>(builder.build(new BulkLoader<Integer, Integer>(context.loader())), ticker, context.isRecordingStats());
} else {
context.cache = new GuavaLoadingCache<>(builder.build(new SingleLoader<Integer, Integer>(context.loader())), ticker, context.isRecordingStats());
}
@SuppressWarnings("unchecked") Cache<K, V> castedCache = (Cache<K, V>) context.cache;
return castedCache;
}
use of com.github.benmanes.caffeine.cache.Cache in project caffeine by ben-manes.
the class Solr10141Test method eviction.
@Test
public void eviction() throws Exception {
AtomicLong hits = new AtomicLong();
AtomicLong inserts = new AtomicLong();
AtomicLong removals = new AtomicLong();
RemovalListener<Long, Val> listener = (k, v, removalCause) -> {
assertThat(v.key, is(k));
if (!v.live.compareAndSet(true, false)) {
throw new RuntimeException(String.format("listener called more than once! k=%s, v=%s, removalCause=%s", k, v, removalCause));
}
removals.incrementAndGet();
};
Cache<Long, Val> cache = Caffeine.newBuilder().removalListener(listener).maximumSize(maxEntries).build();
AtomicLong lastBlock = new AtomicLong();
AtomicBoolean failed = new AtomicBoolean();
AtomicLong maxObservedSize = new AtomicLong();
ConcurrentTestHarness.timeTasks(nThreads, new Runnable() {
@Override
public void run() {
try {
Random r = new Random(rnd.nextLong());
for (int i = 0; i < readsPerThread; i++) {
test(r);
}
} catch (Throwable e) {
failed.set(true);
e.printStackTrace();
}
}
void test(Random r) {
long block = r.nextInt(blocksInTest);
if (readLastBlockOdds > 0 && r.nextInt(readLastBlockOdds) == 0) {
// some percent of the time, try to read the last block another
block = lastBlock.get();
}
// thread was just reading/writing
lastBlock.set(block);
Long k = block;
Val v = cache.getIfPresent(k);
if (v != null) {
hits.incrementAndGet();
assertThat(k, is(v.key));
}
if ((v == null) || (updateAnyway && r.nextBoolean())) {
v = new Val();
v.key = k;
cache.put(k, v);
inserts.incrementAndGet();
}
long sz = cache.estimatedSize();
if (sz > maxObservedSize.get()) {
// race condition here, but an estimate is OK
maxObservedSize.set(sz);
}
}
});
await().until(() -> inserts.get() - removals.get() == cache.estimatedSize());
System.out.printf("Done!%n" + "entries=%,d inserts=%,d removals=%,d hits=%,d maxEntries=%,d maxObservedSize=%,d%n", cache.estimatedSize(), inserts.get(), removals.get(), hits.get(), maxEntries, maxObservedSize.get());
assertThat(failed.get(), is(false));
}
use of com.github.benmanes.caffeine.cache.Cache in project caffeine by ben-manes.
the class Solr10141Test method clear.
@Test
public void clear() throws Exception {
AtomicLong inserts = new AtomicLong();
AtomicLong removals = new AtomicLong();
AtomicBoolean failed = new AtomicBoolean();
RemovalListener<Long, Val> listener = (k, v, removalCause) -> {
assertThat(v.key, is(k));
if (!v.live.compareAndSet(true, false)) {
throw new RuntimeException(String.format("listener called more than once! k=%s, v=%s, removalCause=%s", k, v, removalCause));
}
removals.incrementAndGet();
};
Cache<Long, Val> cache = Caffeine.newBuilder().maximumSize(Integer.MAX_VALUE).removalListener(listener).build();
ConcurrentTestHarness.timeTasks(nThreads, new Runnable() {
@Override
public void run() {
try {
Random r = new Random(rnd.nextLong());
for (int i = 0; i < readsPerThread; i++) {
test(r);
}
} catch (Throwable e) {
failed.set(true);
e.printStackTrace();
}
}
void test(Random r) {
Long k = (long) r.nextInt(blocksInTest);
Val v = cache.getIfPresent(k);
if (v != null) {
assertThat(k, is(v.key));
}
if ((v == null) || (updateAnyway && r.nextBoolean())) {
v = new Val();
v.key = k;
cache.put(k, v);
inserts.incrementAndGet();
}
if (r.nextInt(10) == 0) {
cache.asMap().clear();
}
}
});
cache.asMap().clear();
await().until(() -> inserts.get() == removals.get());
assertThat(failed.get(), is(false));
}
use of com.github.benmanes.caffeine.cache.Cache in project caffeine by ben-manes.
the class WriteBehindCacheWriterTest method givenCacheUpdateOnMultipleKeys_writeBehindIsCalled.
@Test
public void givenCacheUpdateOnMultipleKeys_writeBehindIsCalled() {
AtomicBoolean writerCalled = new AtomicBoolean(false);
AtomicInteger numberOfEntries = new AtomicInteger(0);
// Given this cache...
Cache<Long, ZonedDateTime> cache = Caffeine.newBuilder().writer(new WriteBehindCacheWriter.Builder<Long, ZonedDateTime>().bufferTime(1, TimeUnit.SECONDS).coalesce(BinaryOperator.maxBy(ZonedDateTime::compareTo)).writeAction(entries -> {
numberOfEntries.set(entries.size());
writerCalled.set(true);
}).build()).build();
// When these cache updates happen ...
cache.put(1L, ZonedDateTime.now());
cache.put(2L, ZonedDateTime.now());
cache.put(3L, ZonedDateTime.now());
// Then the write behind action gets 3 entries to write
Awaitility.await().untilTrue(writerCalled);
Assert.assertEquals(3, numberOfEntries.intValue());
}
Aggregations