use of com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout in project java-driver by datastax.
the class ContinuousGraphRequestHandlerSpeculativeExecutionTest method should_stop_retrying_other_executions_if_result_complete.
@Test
@UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig")
public void should_stop_retrying_other_executions_if_result_complete(boolean defaultIdempotence, GraphStatement<?> statement) throws Exception {
GraphRequestHandlerTestHarness.Builder harnessBuilder = GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence);
PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1);
PoolBehavior node2Behavior = harnessBuilder.customBehavior(node2);
PoolBehavior node3Behavior = harnessBuilder.customBehavior(node3);
try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) {
SpeculativeExecutionPolicy speculativeExecutionPolicy = harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME);
long firstExecutionDelay = 100L;
when(speculativeExecutionPolicy.nextExecution(any(Node.class), eq(null), eq(statement), eq(1))).thenReturn(firstExecutionDelay);
GraphBinaryModule module = createGraphBinaryModule(harness.getContext());
CompletionStage<AsyncGraphResultSet> resultSetFuture = new ContinuousGraphRequestHandler(statement, harness.getSession(), harness.getContext(), "test", module, graphSupportChecker).handle();
node1Behavior.verifyWrite();
node1Behavior.setWriteSuccess();
// next scheduled timeout should be the first speculative execution. Get it and run it.
CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout();
assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)).isEqualTo(firstExecutionDelay);
speculativeExecution1.task().run(speculativeExecution1);
node2Behavior.verifyWrite();
node2Behavior.setWriteSuccess();
// Complete the request from the initial execution
node1Behavior.setResponseSuccess(defaultDseFrameOf(singleGraphRow(GraphProtocol.GRAPH_BINARY_1_0, module)));
assertThatStage(resultSetFuture).isSuccess();
// node2 replies with a response that would trigger a RETRY_NEXT if the request was still
// running
node2Behavior.setResponseSuccess(defaultDseFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message")));
// The speculative execution should not move to node3 because it is stopped
node3Behavior.verifyNoWrite();
}
}
use of com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout in project java-driver by datastax.
the class ContinuousCqlRequestHandlerTest method should_time_out_if_other_page_takes_too_long.
@Test
@UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class)
public void should_time_out_if_other_page_takes_too_long(DseProtocolVersion version) throws Exception {
RequestHandlerTestHarness.Builder builder = continuousHarnessBuilder().withProtocolVersion(version);
PoolBehavior node1Behavior = builder.customBehavior(node1);
try (RequestHandlerTestHarness harness = builder.build()) {
CompletionStage<ContinuousAsyncResultSet> page1Future = new ContinuousCqlRequestHandler(UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test").handle();
// mark the initial request as successful, which should schedule a timeout for the first page
node1Behavior.setWriteSuccess();
CapturedTimeout page1Timeout = harness.nextScheduledTimeout();
assertThat(page1Timeout.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(TIMEOUT_FIRST_PAGE.toNanos());
// the server replies with page 1, the corresponding timeout should be cancelled
node1Behavior.setResponseSuccess(defaultFrameOf(DseTestFixtures.tenDseRows(1, false)));
assertThat(page1Timeout.isCancelled()).isTrue();
// request page 2, the queue is empty so this should request more pages and schedule another
// timeout
ContinuousAsyncResultSet page1 = CompletableFutures.getUninterruptibly(page1Future);
CompletionStage<ContinuousAsyncResultSet> page2Future = page1.fetchNextPage();
CapturedTimeout page2Timeout = harness.nextScheduledTimeout();
assertThat(page2Timeout.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(TIMEOUT_OTHER_PAGES.toNanos());
page2Timeout.task().run(page2Timeout);
assertThatStage(page2Future).isFailed(t -> assertThat(t).isInstanceOf(DriverTimeoutException.class).hasMessageContaining("Timed out waiting for page 2"));
}
}
use of com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout in project java-driver by datastax.
the class ContinuousCqlRequestHandlerTest method should_time_out_if_first_page_takes_too_long.
@Test
@UseDataProvider(value = "allDseProtocolVersions", location = DseTestDataProviders.class)
public void should_time_out_if_first_page_takes_too_long(DseProtocolVersion version) throws Exception {
RequestHandlerTestHarness.Builder builder = continuousHarnessBuilder().withProtocolVersion(version);
PoolBehavior node1Behavior = builder.customBehavior(node1);
try (RequestHandlerTestHarness harness = builder.build()) {
CompletionStage<ContinuousAsyncResultSet> resultSetFuture = new ContinuousCqlRequestHandler(UNDEFINED_IDEMPOTENCE_STATEMENT, harness.getSession(), harness.getContext(), "test").handle();
// mark the initial request as successful, which should schedule a timeout for the first page
node1Behavior.setWriteSuccess();
CapturedTimeout page1Timeout = harness.nextScheduledTimeout();
assertThat(page1Timeout.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(TIMEOUT_FIRST_PAGE.toNanos());
page1Timeout.task().run(page1Timeout);
assertThatStage(resultSetFuture).isFailed(t -> assertThat(t).isInstanceOf(DriverTimeoutException.class).hasMessageContaining("Timed out waiting for page 1"));
}
}
use of com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout in project java-driver by datastax.
the class ContinuousGraphRequestHandlerTest method should_honor_default_timeout.
@Test
public void should_honor_default_timeout() throws Exception {
// given
GraphBinaryModule binaryModule = createGraphBinaryModule(mockContext);
Duration defaultTimeout = Duration.ofSeconds(1);
RequestHandlerTestHarness.Builder builder = GraphRequestHandlerTestHarness.builder().withGraphTimeout(defaultTimeout);
PoolBehavior node1Behavior = builder.customBehavior(node);
try (RequestHandlerTestHarness harness = builder.build()) {
DriverExecutionProfile profile = harness.getContext().getConfig().getDefaultProfile();
when(profile.isDefined(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(true);
when(profile.getString(DseDriverOption.GRAPH_SUB_PROTOCOL)).thenReturn(GraphProtocol.GRAPH_BINARY_1_0.toInternalCode());
GraphStatement<?> graphStatement = ScriptGraphStatement.newInstance("mockQuery");
// when
ContinuousGraphRequestHandler handler = new ContinuousGraphRequestHandler(graphStatement, harness.getSession(), harness.getContext(), "test", binaryModule, new GraphSupportChecker());
// send the initial request
CompletionStage<AsyncGraphResultSet> page1Future = handler.handle();
// acknowledge the write, will set the global timeout
node1Behavior.verifyWrite();
node1Behavior.setWriteSuccess();
CapturedTimeout globalTimeout = harness.nextScheduledTimeout();
assertThat(globalTimeout.getDelay(TimeUnit.NANOSECONDS)).isEqualTo(defaultTimeout.toNanos());
// will trigger the global timeout and complete it exceptionally
globalTimeout.task().run(globalTimeout);
assertThat(page1Future.toCompletableFuture()).isCompletedExceptionally();
assertThatThrownBy(() -> page1Future.toCompletableFuture().get()).hasRootCauseExactlyInstanceOf(DriverTimeoutException.class).hasMessageContaining("Query timed out after " + defaultTimeout);
}
}
use of com.datastax.oss.driver.internal.core.util.concurrent.CapturingTimer.CapturedTimeout in project java-driver by datastax.
the class ContinuousGraphRequestHandlerSpeculativeExecutionTest method should_fail_if_no_more_nodes_and_initial_execution_is_last.
@Test
@UseDataProvider(location = DseTestDataProviders.class, value = "idempotentGraphConfig")
public void should_fail_if_no_more_nodes_and_initial_execution_is_last(boolean defaultIdempotence, GraphStatement<?> statement) throws Exception {
GraphRequestHandlerTestHarness.Builder harnessBuilder = GraphRequestHandlerTestHarness.builder().withDefaultIdempotence(defaultIdempotence);
PoolBehavior node1Behavior = harnessBuilder.customBehavior(node1);
harnessBuilder.withResponse(node2, defaultDseFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message")));
try (GraphRequestHandlerTestHarness harness = harnessBuilder.build()) {
SpeculativeExecutionPolicy speculativeExecutionPolicy = harness.getContext().getSpeculativeExecutionPolicy(DriverExecutionProfile.DEFAULT_NAME);
long firstExecutionDelay = 100L;
when(speculativeExecutionPolicy.nextExecution(any(Node.class), eq(null), eq(statement), eq(1))).thenReturn(firstExecutionDelay);
GraphBinaryModule module = createGraphBinaryModule(harness.getContext());
CompletionStage<AsyncGraphResultSet> resultSetFuture = new ContinuousGraphRequestHandler(statement, harness.getSession(), harness.getContext(), "test", module, graphSupportChecker).handle();
node1Behavior.verifyWrite();
node1Behavior.setWriteSuccess();
// do not simulate a response from node1 yet
// Run the next scheduled task to start the speculative execution. node2 will reply with a
// BOOTSTRAPPING error, causing a RETRY_NEXT; but the query plan is now empty so the
// speculative execution stops.
// next scheduled timeout should be the first speculative execution. Get it and run it.
CapturedTimeout speculativeExecution1 = harness.nextScheduledTimeout();
assertThat(speculativeExecution1.getDelay(TimeUnit.MILLISECONDS)).isEqualTo(firstExecutionDelay);
speculativeExecution1.task().run(speculativeExecution1);
// node1 now replies with the same response, that triggers a RETRY_NEXT
node1Behavior.setResponseSuccess(defaultDseFrameOf(new Error(ProtocolConstants.ErrorCode.IS_BOOTSTRAPPING, "mock message")));
// But again the query plan is empty so that should fail the request
assertThatStage(resultSetFuture).isFailed(error -> {
assertThat(error).isInstanceOf(AllNodesFailedException.class);
Map<Node, List<Throwable>> nodeErrors = ((AllNodesFailedException) error).getAllErrors();
assertThat(nodeErrors).containsOnlyKeys(node1, node2);
assertThat(nodeErrors.get(node1).get(0)).isInstanceOf(BootstrappingException.class);
assertThat(nodeErrors.get(node2).get(0)).isInstanceOf(BootstrappingException.class);
});
}
}
Aggregations