use of com.palantir.atlasdb.keyvalue.api.watch.StartTimestamp in project atlasdb by palantir.
the class LockWatchValueScopingCacheImpl method ensureStateRemoved.
@Override
public synchronized void ensureStateRemoved(long startTimestamp) {
StartTimestamp startTs = StartTimestamp.of(startTimestamp);
snapshotStore.removeTimestamp(startTs);
cacheStore.removeCache(startTs);
}
use of com.palantir.atlasdb.keyvalue.api.watch.StartTimestamp in project atlasdb by palantir.
the class LockWatchValueScopingCacheImpl method onSuccessfulCommit.
@Override
public synchronized void onSuccessfulCommit(long startTimestamp) {
StartTimestamp startTs = StartTimestamp.of(startTimestamp);
TransactionScopedCache cache = cacheStore.getCache(startTs);
cache.finalise();
Map<CellReference, CacheValue> cachedValues = cache.getValueDigest().loadedValues();
if (!cachedValues.isEmpty()) {
CommitUpdate commitUpdate = eventCache.getEventUpdate(startTimestamp);
commitUpdate.accept(new CommitUpdate.Visitor<Void>() {
@Override
public Void invalidateAll() {
// because of read-write conflicts, that is handled in the PreCommitCondition.
return null;
}
@Override
public Void invalidateSome(Set<LockDescriptor> invalidatedLocks) {
Set<CellReference> invalidatedCells = invalidatedLocks.stream().map(AtlasLockDescriptorUtils::candidateCells).flatMap(List::stream).collect(Collectors.toSet());
KeyedStream.stream(cachedValues).filterKeys(cellReference -> !invalidatedCells.contains(cellReference)).forEach(valueStore::putValue);
return null;
}
});
}
ensureStateRemoved(startTimestamp);
}
use of com.palantir.atlasdb.keyvalue.api.watch.StartTimestamp in project atlasdb by palantir.
the class CacheStoreImplTest method cachesExceedingMaximumCountThrows.
@Test
public void cachesExceedingMaximumCountThrows() {
SnapshotStore snapshotStore = SnapshotStoreImpl.create(metrics);
CacheStore cacheStore = new CacheStoreImpl(snapshotStore, VALIDATION_PROBABILITY, () -> {
}, metrics, 1);
StartTimestamp timestamp = StartTimestamp.of(22222L);
snapshotStore.storeSnapshot(Sequence.of(5L), ImmutableSet.of(TIMESTAMP_1, TIMESTAMP_2, timestamp), ValueCacheSnapshotImpl.of(HashMap.empty(), HashSet.empty(), ImmutableSet.of()));
cacheStore.createCache(TIMESTAMP_1);
cacheStore.createCache(timestamp);
assertThatThrownBy(() -> cacheStore.createCache(TIMESTAMP_2)).isExactlyInstanceOf(SafeIllegalStateException.class).hasMessage("Exceeded maximum concurrent caches; transaction can be retried, but with caching " + "disabled");
assertThat(getTransactionCacheInstanceCount()).isEqualTo(2);
}
use of com.palantir.atlasdb.keyvalue.api.watch.StartTimestamp in project atlasdb by palantir.
the class LockWatchValueScopingCacheImpl method processCommitUpdate.
private synchronized void processCommitUpdate(long startTimestamp) {
StartTimestamp startTs = StartTimestamp.of(startTimestamp);
TransactionScopedCache cache = cacheStore.getCache(startTs);
cache.finalise();
CommitUpdate commitUpdate = eventCache.getCommitUpdate(startTimestamp);
cacheStore.createReadOnlyCache(startTs, commitUpdate);
}
use of com.palantir.atlasdb.keyvalue.api.watch.StartTimestamp in project atlasdb by palantir.
the class LockWatchValueScopingCacheImpl method updateStores.
/**
* In order to maintain the necessary invariants, we need to do the following:
*
* 1. For each new event, we apply it to the cache. The effects of this application is described in
* {@link LockWatchValueScopingCache}.
* 2. For each transaction, we must ensure that we store a snapshot of the cache at the sequence corresponding
* to the transaction's start timestamp. Note that not every sequence will have a corresponding timestamp, so we
* don't bother storing a snapshot for those sequences. Also note that we know that each call here will only
* ever have new events, and that consecutive calls to this method will *always* have increasing sequences
* (without this last guarantee, we'd need to store snapshots for all sequences).
* 3. For each transaction, we must create a transaction scoped cache. We do this now as we have tighter guarantees
* around when the cache is created, and thus deleted.
*/
private synchronized void updateStores(TransactionsLockWatchUpdate updateForTransactions) {
Multimap<Sequence, StartTimestamp> reversedMap = createSequenceTimestampMultimap(updateForTransactions);
// Without this block, updates with no events would not store a snapshot.
currentVersion.map(LockWatchVersion::version).map(Sequence::of).ifPresent(sequence -> snapshotStore.storeSnapshot(sequence, reversedMap.get(sequence), valueStore.getSnapshot()));
updateForTransactions.events().stream().filter(this::isNewEvent).forEach(event -> {
valueStore.applyEvent(event);
Sequence sequence = Sequence.of(event.sequence());
snapshotStore.storeSnapshot(sequence, reversedMap.get(sequence), valueStore.getSnapshot());
});
updateForTransactions.startTsToSequence().keySet().forEach(timestamp -> cacheStore.createCache(StartTimestamp.of(timestamp)));
if (valueStore.getSnapshot().hasAnyTablesWatched()) {
assertNoSnapshotsMissing(reversedMap);
}
}
Aggregations