use of io.crate.execution.jobs.TasksService in project crate by crate.
the class DocLevelCollectTest method collect.
private Bucket collect(RoutedCollectPhase collectNode) throws Throwable {
JobSetup jobSetup = internalCluster().getDataNodeInstance(JobSetup.class);
TasksService tasksService = internalCluster().getDataNodeInstance(TasksService.class);
SharedShardContexts sharedShardContexts = new SharedShardContexts(internalCluster().getDataNodeInstance(IndicesService.class), UnaryOperator.identity());
RootTask.Builder builder = tasksService.newBuilder(collectNode.jobId());
NodeOperation nodeOperation = NodeOperation.withDirectResponse(collectNode, mock(ExecutionPhase.class), (byte) 0, "remoteNode");
List<CompletableFuture<StreamBucket>> results = jobSetup.prepareOnRemote(DUMMY_SESSION_INFO, List.of(nodeOperation), builder, sharedShardContexts);
RootTask rootTask = tasksService.createTask(builder);
rootTask.start();
return results.get(0).get(2, TimeUnit.SECONDS);
}
use of io.crate.execution.jobs.TasksService in project crate by crate.
the class TasksServiceIntegrationTest method testAllTasksAreClosed.
@Test
public void testAllTasksAreClosed() throws Exception {
// lets create some contexts which must be closed after statement execution
// group-by query (job collect context with sub-contexts + DistResultRXTask are created)
setup.groupBySetup();
execute("select age, name from characters group by 1, 2");
// system table query (job collect context without sub-contexts is created)
execute("select random(), random() from sys.cluster limit 1");
// information_schema table query (job collect context without sub-contexts is created)
execute("select table_name from information_schema.tables");
// multiple upserts (SymbolBasedBulkShardProcessorContext is created)
execute("create table upserts (id int primary key, d long)");
ensureYellow();
execute("insert into upserts (id, d) values (?, ?)", new Object[][] { new Object[] { 1, -7L }, new Object[] { 2, 3L } });
refresh();
// upsert-by-id (UpsertByIdContext is created)
execute("update upserts set d = 5 where id = 1");
// get by id (ESJobContext is created)
execute("select * from upserts where id = 1");
// count (CountTask is created)
execute("select count(*) from upserts");
// now check if all tasks are gone
final Field activeTasks = TasksService.class.getDeclaredField("activeTasks");
activeTasks.setAccessible(true);
assertBusy(() -> {
for (TasksService tasksService : internalCluster().getInstances(TasksService.class)) {
Map<UUID, RootTask> tasksByJobId;
try {
tasksByJobId = (Map<UUID, RootTask>) activeTasks.get(tasksService);
assertThat(tasksByJobId.size(), is(0));
} catch (Exception e) {
fail(e.getMessage());
}
}
}, 1, TimeUnit.SECONDS);
}
use of io.crate.execution.jobs.TasksService in project crate by crate.
the class KillPlanTest method testKillTaskCallsBroadcastOnTransportKillAllNodeAction.
@Test
public void testKillTaskCallsBroadcastOnTransportKillAllNodeAction() {
AtomicInteger broadcastCalls = new AtomicInteger(0);
AtomicInteger nodeOperationCalls = new AtomicInteger(0);
TransportKillAllNodeAction killAllNodeAction = new TransportKillAllNodeAction(new TasksService(clusterService, new JobsLogs(() -> false)), clusterService, mock(TransportService.class)) {
@Override
public void broadcast(KillAllRequest request, ActionListener<Long> listener) {
broadcastCalls.incrementAndGet();
}
@Override
public CompletableFuture<KillResponse> nodeOperation(KillAllRequest request) {
nodeOperationCalls.incrementAndGet();
return super.nodeOperation(request);
}
};
KillPlan killPlan = new KillPlan(null);
killPlan.execute(null, "dummy-user", killAllNodeAction, mock(TransportKillJobsNodeAction.class), new TestingRowConsumer());
assertThat(broadcastCalls.get(), is(1));
assertThat(nodeOperationCalls.get(), is(0));
}
use of io.crate.execution.jobs.TasksService in project crate by crate.
the class TransportKillAllNodeActionTest method testKillIsCalledOnTasks.
@Test
public void testKillIsCalledOnTasks() throws Exception {
TasksService tasksService = mock(TasksService.class, Answers.RETURNS_MOCKS);
TransportKillAllNodeAction transportKillAllNodeAction = new TransportKillAllNodeAction(tasksService, clusterService, MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, THREAD_POOL, clusterService.getClusterSettings()));
transportKillAllNodeAction.nodeOperation(new KillAllRequest("dummy-user")).get(5, TimeUnit.SECONDS);
verify(tasksService, times(1)).killAll("dummy-user");
}
use of io.crate.execution.jobs.TasksService in project crate by crate.
the class NodeDisconnectJobMonitorServiceTest method testOnParticipatingNodeDisconnectedKillsJob.
@Test
public void testOnParticipatingNodeDisconnectedKillsJob() throws Exception {
TasksService tasksService = tasksInstance();
DiscoveryNode coordinator = newNode("coordinator");
DiscoveryNode dataNode = newNode("dataNode");
RootTask.Builder builder = tasksService.newBuilder(UUID.randomUUID(), "dummy-user", coordinator.getId(), Arrays.asList(coordinator.getId(), dataNode.getId()));
builder.addTask(new DummyTask());
tasksService.createTask(builder);
// add a second job that is coordinated by the other node to make sure the the broadcast logic is run
// even though there are jobs coordinated by the disconnected node
builder = tasksService.newBuilder(UUID.randomUUID(), "dummy-user", dataNode.getId(), Collections.emptySet());
builder.addTask(new DummyTask());
tasksService.createTask(builder);
AtomicInteger broadcasts = new AtomicInteger(0);
TransportKillJobsNodeAction killAction = new TransportKillJobsNodeAction(tasksService, clusterService, mock(TransportService.class)) {
@Override
public void broadcast(KillJobsRequest request, ActionListener<Long> listener, Collection<String> excludedNodeIds) {
broadcasts.incrementAndGet();
}
};
NodeDisconnectJobMonitorService monitorService = new NodeDisconnectJobMonitorService(tasksService, new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), mock(TransportService.class), killAction);
monitorService.onNodeDisconnected(dataNode, mock(Transport.Connection.class));
assertThat(broadcasts.get(), is(1));
monitorService.close();
}
Aggregations