use of com.palantir.lock.watch.LockWatchCache in project atlasdb by palantir.
the class TimeLockHelperServices method create.
static TimeLockHelperServices create(String namespace, MetricsManager metricsManager, Set<Schema> schemas, LockWatchStarter lockWatchStarter, LockWatchCachingConfig lockWatchCachingConfig, Supplier<Optional<RequestBatchersFactory.MultiClientRequestBatchers>> requestBatcherProvider) {
LockWatchManagerInternal lockWatchManager = LockWatchManagerImpl.create(metricsManager, schemas, lockWatchStarter, lockWatchCachingConfig);
LockWatchCache lockWatchCache = lockWatchManager.getCache();
RequestBatchersFactory requestBatchersFactory = RequestBatchersFactory.create(lockWatchCache, Namespace.of(namespace), requestBatcherProvider.get());
return ImmutableTimeLockHelperServices.builder().lockWatchManager(lockWatchManager).requestBatchersFactory(requestBatchersFactory).build();
}
use of com.palantir.lock.watch.LockWatchCache in project atlasdb by palantir.
the class MultiClientCommitTimestampGetterTest method updatesCacheWhileProcessingResponse.
@Test
public void updatesCacheWhileProcessingResponse() {
Namespace client = Namespace.of("Kitty");
List<BatchElement<NamespacedRequest, Long>> batchElements = IntStream.range(0, COMMIT_TS_LIMIT_PER_REQUEST * 2).mapToObj(ind -> batchElementForNamespace(client)).collect(toList());
setupServiceAndAssertSanityOfResponse(batchElements);
LockWatchCache cache = lockWatchCacheMap.get(client);
verify(cache, times(2)).processCommitTimestampsUpdate(any(), any());
}
use of com.palantir.lock.watch.LockWatchCache in project atlasdb by palantir.
the class MultiClientCommitTimestampGetterTest method doesNotUpdateCacheIfClientNotServed.
@Test
public void doesNotUpdateCacheIfClientNotServed() {
Namespace alpha = Namespace.of("alpha" + UUID.randomUUID());
Namespace beta = Namespace.of("beta" + UUID.randomUUID());
BatchElement<NamespacedRequest, Long> requestForAlpha = batchElementForNamespace(alpha);
BatchElement<NamespacedRequest, Long> requestForBeta = batchElementForNamespace(beta);
List<BatchElement<NamespacedRequest, Long>> allRequests = ImmutableList.of(requestForAlpha, requestForBeta);
List<BatchElement<NamespacedRequest, Long>> alphaRequestList = ImmutableList.of(requestForAlpha);
Map<Namespace, GetCommitTimestampsResponse> responseMap = getCommitTimestamps(alphaRequestList);
when(timelockService.getCommitTimestamps(any())).thenReturn(responseMap).thenThrow(EXCEPTION);
assertThatThrownBy(() -> consumer.accept(allRequests)).isEqualTo(EXCEPTION);
// assert requests made by client alpha are served
assertSanityOfResponse(alphaRequestList, ImmutableMap.of(alpha, ImmutableList.of(responseMap.get(alpha))));
LockWatchCache alphaCache = lockWatchCacheMap.get(alpha);
verify(alphaCache).processCommitTimestampsUpdate(any(), any());
assertThat(requestForBeta.result().isDone()).as("No requests made by client - beta were successful").isFalse();
LockWatchCache betaCache = lockWatchCacheMap.get(beta);
verify(betaCache, never()).processCommitTimestampsUpdate(any(), any());
}
use of com.palantir.lock.watch.LockWatchCache in project atlasdb by palantir.
the class LockWatchValueIntegrationTest method nearbyCommitsDoNotAffectResultsPresentInCache.
@Test
public void nearbyCommitsDoNotAffectResultsPresentInCache() {
createTransactionManager(1.0);
txnManager.runTaskThrowOnConflict(txn -> {
txn.put(TABLE_REF, ImmutableMap.of(CELL_1, DATA_1, CELL_2, DATA_2, CELL_3, DATA_3));
return null;
});
LockWatchIntegrationTestUtilities.awaitAllUnlocked(txnManager);
LockWatchIntegrationTestUtilities.CommitStageCondition<Void> condition = new LockWatchIntegrationTestUtilities.CommitStageCondition<>((startTs, commitTs) -> {
LockWatchCache cache = LockWatchIntegrationTestUtilities.extractInternalLockWatchManager(txnManager).getCache();
simulateOverlappingWriteTransaction(cache, startTs, commitTs);
return null;
});
assertThatCode(() -> txnManager.runTaskWithConditionThrowOnConflict(condition, (txn, _unused) -> {
condition.initialiseWithStartTimestamp(txn.getTimestamp());
txnManager.getTimestampManagementService().fastForwardTimestamp(txn.getTimestamp() + 1_000_000);
txn.get(TABLE_REF, ImmutableSet.of(CELL_1));
// A write forces this to go through serializable conflict checking
txn.put(TABLE_REF, ImmutableMap.of(CELL_2, DATA_1));
return null;
})).doesNotThrowAnyException();
}
use of com.palantir.lock.watch.LockWatchCache in project atlasdb by palantir.
the class BatchingIdentifiedAtlasDbTransactionStarter method getStartTransactionResponses.
private static List<StartIdentifiedAtlasDbTransactionResponse> getStartTransactionResponses(LockLeaseService lockLeaseService, LockWatchCache cache, int numberOfTransactions) {
List<StartIdentifiedAtlasDbTransactionResponse> result = new ArrayList<>();
while (result.size() < numberOfTransactions) {
try {
Optional<LockWatchVersion> requestedVersion = cache.getEventCache().lastKnownVersion();
ConjureStartTransactionsResponse response = lockLeaseService.startTransactionsWithWatches(requestedVersion, numberOfTransactions - result.size());
TransactionStarterHelper.updateCacheWithStartTransactionResponse(cache, response);
result.addAll(TransactionStarterHelper.split(response));
} catch (Throwable t) {
TransactionStarterHelper.cleanUpCaches(cache, result);
TransactionStarterHelper.unlock(result.stream().map(response -> response.immutableTimestamp().getLock()).collect(Collectors.toSet()), lockLeaseService);
throw Throwables.throwUncheckedException(t);
}
}
return result;
}
Aggregations