use of com.palantir.atlasdb.keyvalue.api.CellReference in project atlasdb by palantir.
the class LockWatchValueScopingCacheImpl method onSuccessfulCommit.
@Override
public synchronized void onSuccessfulCommit(long startTimestamp) {
StartTimestamp startTs = StartTimestamp.of(startTimestamp);
TransactionScopedCache cache = cacheStore.getCache(startTs);
cache.finalise();
Map<CellReference, CacheValue> cachedValues = cache.getValueDigest().loadedValues();
if (!cachedValues.isEmpty()) {
CommitUpdate commitUpdate = eventCache.getEventUpdate(startTimestamp);
commitUpdate.accept(new CommitUpdate.Visitor<Void>() {
@Override
public Void invalidateAll() {
// because of read-write conflicts, that is handled in the PreCommitCondition.
return null;
}
@Override
public Void invalidateSome(Set<LockDescriptor> invalidatedLocks) {
Set<CellReference> invalidatedCells = invalidatedLocks.stream().map(AtlasLockDescriptorUtils::candidateCells).flatMap(List::stream).collect(Collectors.toSet());
KeyedStream.stream(cachedValues).filterKeys(cellReference -> !invalidatedCells.contains(cellReference)).forEach(valueStore::putValue);
return null;
}
});
}
ensureStateRemoved(startTimestamp);
}
use of com.palantir.atlasdb.keyvalue.api.CellReference in project atlasdb by palantir.
the class ValueStoreImplTest method valuesEvictedOnceMaxSizeReached.
@Test
public void valuesEvictedOnceMaxSizeReached() {
// size is in bytes; with overhead, this should keep 2 but not three values
valueStore = new ValueStoreImpl(ImmutableSet.of(TABLE), 300, metrics);
CellReference tableCell2 = CellReference.of(TABLE, CELL_2);
valueStore.applyEvent(WATCH_EVENT);
valueStore.putValue(TABLE_CELL, VALUE_1);
valueStore.putValue(tableCell2, VALUE_2);
verify(metrics, times(2)).increaseCacheSize(EXPECTED_SIZE);
valueStore.putValue(CellReference.of(TABLE, CELL_3), VALUE_3);
verify(metrics, times(3)).increaseCacheSize(anyLong());
verify(metrics).decreaseCacheSize(EXPECTED_SIZE);
// Caffeine explicitly does *not* implement simple LRU, so we cannot reason on the actual entries here.
assertThat(((ValueCacheSnapshotImpl) valueStore.getSnapshot()).values()).hasSize(2);
}
use of com.palantir.atlasdb.keyvalue.api.CellReference in project atlasdb by palantir.
the class AbstractSweepQueueTest method writeToCellsInFixedShard.
List<WriteInfo> writeToCellsInFixedShard(SweepQueueTable writer, long ts, int number, TableReference tableRef) {
List<WriteInfo> result = new ArrayList<>();
for (long i = 0; i < number; i++) {
CellReference cellRef = getCellRefWithFixedShard(i, tableRef, numShards);
result.add(WriteInfo.write(tableRef, cellRef.cell(), ts));
}
putTimestampIntoTransactionTable(ts, ts);
writer.enqueue(result);
return result;
}
use of com.palantir.atlasdb.keyvalue.api.CellReference in project atlasdb by palantir.
the class TransactionCacheValueStoreImpl method recordRemoteWrite.
@Override
public void recordRemoteWrite(TableReference table, Cell cell) {
CellReference cellReference = CellReference.of(table, cell);
recordRemoteWriteInternal(cellReference);
}
use of com.palantir.atlasdb.keyvalue.api.CellReference in project atlasdb by palantir.
the class ValueStoreImplTest method metricsCalculateSize.
@Test
public void metricsCalculateSize() {
valueStore = new ValueStoreImpl(ImmutableSet.of(TABLE), 300, metrics);
valueStore.applyEvent(WATCH_EVENT);
CellReference cellRef = CellReference.of(TABLE, Cell.create(new byte[] { 1, 2 }, new byte[] { 3, 4, 5 }));
valueStore.putValue(cellRef, CacheValue.of(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }));
int expectedSize = ValueStoreImpl.CACHE_OVERHEAD + 7 + 2 + 3 + 8;
verify(metrics).increaseCacheSize(expectedSize);
}
Aggregations