use of com.palantir.atlasdb.timelock.api.MultiClientConjureTimelockService in project atlasdb by palantir.
the class MultiNodePaxosTimeLockServerIntegrationTest method assertSanityAndGetLeaderTimes.
private LeaderTimes assertSanityAndGetLeaderTimes(TestableTimelockServer leader, Set<Namespace> expectedNamespaces) {
MultiClientConjureTimelockService multiClientConjureTimelockService = leader.multiClientService();
LeaderTimes leaderTimes = multiClientConjureTimelockService.leaderTimes(AUTH_HEADER, expectedNamespaces);
Set<Namespace> namespaces = leaderTimes.getLeaderTimes().keySet();
assertThat(namespaces).hasSameElementsAs(expectedNamespaces);
return leaderTimes;
}
use of com.palantir.atlasdb.timelock.api.MultiClientConjureTimelockService in project atlasdb by palantir.
the class MultiNodePaxosTimeLockServerIntegrationTest method sanityCheckMultiClientGetCommitTimestampsAgainstConjureTimelockService.
@Test
public void sanityCheckMultiClientGetCommitTimestampsAgainstConjureTimelockService() {
TestableTimelockServer leader = cluster.currentLeaderFor(client.namespace());
// Multi client batched TimeLock endpoints do not support multi-leader mode on TimeLock
Assume.assumeFalse(leader.isMultiLeader());
MultiClientConjureTimelockService multiClientService = leader.multiClientService();
Set<String> expectedNamespaces = ImmutableSet.of("alta", "mp");
Map<Namespace, GetCommitTimestampsResponse> multiClientResponses = multiClientService.getCommitTimestampsForClients(AUTH_HEADER, defaultGetCommitTimestampsRequests(expectedNamespaces));
assertSanityOfNamespacesServed(expectedNamespaces, multiClientResponses);
// Whether we hit the multi client endpoint or conjureTimelockService endpoint(services one client in one
// call), for a namespace, the underlying service to process the request is the same
multiClientResponses.forEach((namespace, responseFromBatchedEndpoint) -> {
GetCommitTimestampsResponse conjureGetCommitTimestampResponse = leader.client(namespace.get()).namespacedConjureTimelockService().getCommitTimestamps(defaultCommitTimestampRequest());
assertThat(conjureGetCommitTimestampResponse.getLockWatchUpdate().logId()).isEqualTo(responseFromBatchedEndpoint.getLockWatchUpdate().logId());
assertThat(conjureGetCommitTimestampResponse.getInclusiveLower()).as("timestamps should contiguously increase per namespace if there are no elections.").isEqualTo(responseFromBatchedEndpoint.getInclusiveUpper() + 1);
});
}
use of com.palantir.atlasdb.timelock.api.MultiClientConjureTimelockService in project atlasdb by palantir.
the class MultiNodePaxosTimeLockServerIntegrationTest method assertSanityAndStartTransactions.
private Map<Namespace, ConjureStartTransactionsResponse> assertSanityAndStartTransactions(TestableTimelockServer leader, List<String> expectedNamespaces) {
MultiClientConjureTimelockService multiClientConjureTimelockService = leader.multiClientService();
int numTransactions = 5;
Map<Namespace, ConjureStartTransactionsRequest> namespaceToRequestMap = defaultStartTransactionsRequests(expectedNamespaces, numTransactions);
Map<Namespace, ConjureStartTransactionsResponse> startedTransactions = multiClientConjureTimelockService.startTransactionsForClients(AUTH_HEADER, namespaceToRequestMap);
Set<String> namespaces = startedTransactions.keySet().stream().map(Namespace::get).collect(Collectors.toSet());
assertThat(namespaces).hasSameElementsAs(expectedNamespaces);
assertThat(startedTransactions.values().stream().map(ConjureStartTransactionsResponse::getTimestamps).mapToLong(partitionedTimestamps -> partitionedTimestamps.stream().count()).sum()).isEqualTo(namespaces.size() * numTransactions);
return startedTransactions;
}
use of com.palantir.atlasdb.timelock.api.MultiClientConjureTimelockService in project atlasdb by palantir.
the class MultiNodePaxosTimeLockServerIntegrationTest method sanityCheckMultiClientGetCommitTimestamps.
@Test
public void sanityCheckMultiClientGetCommitTimestamps() {
TestableTimelockServer leader = cluster.currentLeaderFor(client.namespace());
// Multi client batched TimeLock endpoints do not support multi-leader mode on TimeLock
Assume.assumeFalse(leader.isMultiLeader());
MultiClientConjureTimelockService service = leader.multiClientService();
Set<String> expectedNamespaces = ImmutableSet.of("cli-1", "cli-2");
Map<Namespace, GetCommitTimestampsResponse> multiClientResponses = service.getCommitTimestampsForClients(AUTH_HEADER, defaultGetCommitTimestampsRequests(expectedNamespaces));
assertSanityOfNamespacesServed(expectedNamespaces, multiClientResponses);
Set<UUID> leadershipIds = multiClientResponses.values().stream().map(GetCommitTimestampsResponse::getLockWatchUpdate).map(LockWatchStateUpdate::logId).collect(Collectors.toSet());
assertThat(leadershipIds).hasSameSizeAs(expectedNamespaces);
}
use of com.palantir.atlasdb.timelock.api.MultiClientConjureTimelockService in project atlasdb by palantir.
the class MultiNodePaxosTimeLockServerIntegrationTest method sanityCheckMultiClientStartTransactionsAgainstConjureTimelockService.
@Test
public void sanityCheckMultiClientStartTransactionsAgainstConjureTimelockService() {
TestableTimelockServer leader = cluster.currentLeaderFor(client.namespace());
// Multi client batched TimeLock endpoints do not support multi-leader mode on TimeLock
Assume.assumeFalse(leader.isMultiLeader());
MultiClientConjureTimelockService multiClientConjureTimelockService = leader.multiClientService();
List<String> expectedNamespaces = ImmutableList.of("alpha", "beta");
int numTransactions = 7;
Map<Namespace, ConjureStartTransactionsRequest> namespaceToRequestMap = defaultStartTransactionsRequests(expectedNamespaces, numTransactions);
Map<Namespace, ConjureStartTransactionsResponse> startedTransactions = multiClientConjureTimelockService.startTransactionsForClients(AUTH_HEADER, namespaceToRequestMap);
// Whether we hit the multi client endpoint or conjureTimelockService endpoint, for a namespace, the underlying
// service to process the request is the same
startedTransactions.forEach((namespace, responseFromBatchedEndpoint) -> {
ConjureStartTransactionsResponse responseFromLegacyEndpoint = leader.client(namespace.get()).namespacedConjureTimelockService().startTransactions(namespaceToRequestMap.get(namespace));
assertThat(responseFromLegacyEndpoint.getLockWatchUpdate().logId()).isEqualTo(responseFromBatchedEndpoint.getLockWatchUpdate().logId());
PartitionedTimestamps batchedEndpointTimestamps = responseFromBatchedEndpoint.getTimestamps();
long lastTimestamp = batchedEndpointTimestamps.stream().max().orElseThrow(SafeIllegalStateException::new);
assertThat(responseFromLegacyEndpoint.getTimestamps().start()).isGreaterThan(lastTimestamp);
});
}
Aggregations