use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.
the class HttpRemoteTaskRunnerTest method testWorkerDisapperAndReappearBeforeItsCleanup.
@Test(timeout = 60_000L)
public void testWorkerDisapperAndReappearBeforeItsCleanup() throws Exception {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
ConcurrentMap<String, CustomFunction> workerHolders = new ConcurrentHashMap<>();
HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {
@Override
public int getPendingTasksRunnerNumThreads() {
return 3;
}
}, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {
@Override
protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
if (workerHolders.containsKey(worker.getHost())) {
return workerHolders.get(worker.getHost()).apply(smileMapper, httpClient, config, workersSyncExec, listener, worker, knownAnnouncements);
} else {
throw new ISE("No WorkerHolder for [%s].", worker.getHost());
}
}
};
taskRunner.start();
Task task1 = NoopTask.create("task-id-1", 0);
Task task2 = NoopTask.create("task-id-2", 0);
DiscoveryDruidNode druidNode = new DiscoveryDruidNode(new DruidNode("service", "host", false, 1234, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
workerHolders.put("host:1234", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task1, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task1, TaskStatus.success(task1.getId()), TaskLocation.create("host", 1234, 1235))), task2, ImmutableList.of(TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.create("host", 1234, 1235)))), new AtomicInteger(), ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode));
Future<TaskStatus> future1 = taskRunner.run(task1);
Future<TaskStatus> future2 = taskRunner.run(task2);
while (taskRunner.getPendingTasks().size() > 0) {
Thread.sleep(100);
}
druidNodeDiscovery.getListeners().get(0).nodesRemoved(ImmutableList.of(druidNode));
workerHolders.put("host:1234", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task2, TaskStatus.success(task2.getId()), TaskLocation.create("host", 1234, 1235))), ImmutableMap.of(), new AtomicInteger(), ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode));
Assert.assertTrue(future1.get().isSuccess());
Assert.assertTrue(future2.get().isSuccess());
}
use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.
the class HttpRemoteTaskRunnerTest method testWorkerDisapperAndReappearAfterItsCleanup.
@Test(timeout = 60_000L)
public void testWorkerDisapperAndReappearAfterItsCleanup() throws Exception {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
ConcurrentMap<String, CustomFunction> workerHolders = new ConcurrentHashMap<>();
HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {
@Override
public Period getTaskCleanupTimeout() {
return Period.millis(1);
}
}, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {
@Override
protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
if (workerHolders.containsKey(worker.getHost())) {
return workerHolders.get(worker.getHost()).apply(smileMapper, httpClient, config, workersSyncExec, listener, worker, knownAnnouncements);
} else {
throw new ISE("No WorkerHolder for [%s].", worker.getHost());
}
}
};
taskRunner.start();
Task task1 = NoopTask.create("task-id-1", 0);
Task task2 = NoopTask.create("task-id-2", 0);
DiscoveryDruidNode druidNode = new DiscoveryDruidNode(new DruidNode("service", "host", false, 1234, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
workerHolders.put("host:1234", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task1, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.create("host", 1234, 1235))), task2, ImmutableList.of(TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.create("host", 1234, 1235)))), new AtomicInteger(), ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode));
Future<TaskStatus> future1 = taskRunner.run(task1);
Future<TaskStatus> future2 = taskRunner.run(task2);
while (taskRunner.getPendingTasks().size() > 0) {
Thread.sleep(100);
}
druidNodeDiscovery.getListeners().get(0).nodesRemoved(ImmutableList.of(druidNode));
Assert.assertTrue(future1.get().isFailure());
Assert.assertTrue(future2.get().isFailure());
Assert.assertNotNull(future1.get().getErrorMsg());
Assert.assertNotNull(future2.get().getErrorMsg());
Assert.assertTrue(future1.get().getErrorMsg().startsWith("The worker that this task was assigned disappeared and did not report cleanup within timeout"));
Assert.assertTrue(future2.get().getErrorMsg().startsWith("The worker that this task was assigned disappeared and did not report cleanup within timeout"));
AtomicInteger ticks = new AtomicInteger();
Set<String> actualShutdowns = new ConcurrentHashSet<>();
workerHolders.put("host:1234", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.success(task1.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.create("host", 1234, 1235))), ImmutableMap.of(), ticks, actualShutdowns));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode));
while (ticks.get() < 1) {
Thread.sleep(100);
}
Assert.assertEquals(ImmutableSet.of(task2.getId()), actualShutdowns);
Assert.assertTrue(taskRunner.run(task1).get().isFailure());
Assert.assertTrue(taskRunner.run(task2).get().isFailure());
}
use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.
the class HttpRemoteTaskRunnerTest method testFreshStartAndStop.
/*
Simulates startup of Overlord. Overlord is then stopped and is expected to close down certain things.
*/
@Test(timeout = 60_000L)
public void testFreshStartAndStop() {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery).times(2);
ProvisioningStrategy provisioningStrategy = EasyMock.createMock(ProvisioningStrategy.class);
ProvisioningService provisioningService = EasyMock.createNiceMock(ProvisioningService.class);
EasyMock.expect(provisioningStrategy.makeProvisioningService(isA(HttpRemoteTaskRunner.class))).andReturn(provisioningService);
provisioningService.close();
EasyMock.expectLastCall();
EasyMock.replay(druidNodeDiscoveryProvider, provisioningStrategy, provisioningService);
DiscoveryDruidNode druidNode1 = new DiscoveryDruidNode(new DruidNode("service", "host1", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
DiscoveryDruidNode druidNode2 = new DiscoveryDruidNode(new DruidNode("service", "host2", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip2", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
HttpRemoteTaskRunner taskRunner = newHttpTaskRunnerInstance(druidNodeDiscoveryProvider, provisioningStrategy);
taskRunner.start();
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode1, druidNode2));
ConcurrentMap<String, WorkerHolder> workers = taskRunner.getWorkersForTestingReadOnly();
Assert.assertEquals(2, workers.size());
Assert.assertTrue(workers.values().stream().noneMatch(w -> w.getUnderlyingSyncer().isExecutorShutdown()));
workers.values().iterator().next().stop();
taskRunner.stop();
Assert.assertTrue(druidNodeDiscovery.getListeners().isEmpty());
Assert.assertEquals(2, workers.size());
Assert.assertTrue(workers.values().stream().allMatch(w -> w.getUnderlyingSyncer().isExecutorShutdown()));
EasyMock.verify(druidNodeDiscoveryProvider, provisioningStrategy, provisioningService);
}
use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.
the class HttpRemoteTaskRunnerTest method testMarkWorkersLazy.
@Test(timeout = 60_000L)
public void testMarkWorkersLazy() throws Exception {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
Task task1 = NoopTask.create("task-id-1", 0);
Task task2 = NoopTask.create("task-id-2", 0);
String additionalWorkerCategory = "category2";
ConcurrentMap<String, CustomFunction> workerHolders = new ConcurrentHashMap<>();
HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {
@Override
public int getPendingTasksRunnerNumThreads() {
return 3;
}
}, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {
@Override
protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
if (workerHolders.containsKey(worker.getHost())) {
return workerHolders.get(worker.getHost()).apply(smileMapper, httpClient, config, workersSyncExec, listener, worker, knownAnnouncements);
} else {
throw new ISE("No WorkerHolder for [%s].", worker.getHost());
}
}
};
taskRunner.start();
Assert.assertTrue(taskRunner.getTotalTaskSlotCount().isEmpty());
Assert.assertTrue(taskRunner.getIdleTaskSlotCount().isEmpty());
Assert.assertTrue(taskRunner.getUsedTaskSlotCount().isEmpty());
AtomicInteger ticks = new AtomicInteger();
DiscoveryDruidNode druidNode1 = new DiscoveryDruidNode(new DruidNode("service", "host1", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 1, "0", WorkerConfig.DEFAULT_CATEGORY)));
workerHolders.put("host1:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task1, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.create("host1", 8080, -1)))), ticks, ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode1));
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
taskRunner.run(task1);
while (ticks.get() < 1) {
Thread.sleep(100);
}
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
DiscoveryDruidNode druidNode2 = new DiscoveryDruidNode(new DruidNode("service", "host2", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip2", 1, "0", additionalWorkerCategory)));
workerHolders.put("host2:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task2, ImmutableList.of()), ticks, ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode2));
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
taskRunner.run(task2);
while (ticks.get() < 2) {
Thread.sleep(100);
}
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
DiscoveryDruidNode druidNode3 = new DiscoveryDruidNode(new DruidNode("service", "host3", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip2", 1, "0", WorkerConfig.DEFAULT_CATEGORY)));
workerHolders.put("host3:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(), new AtomicInteger(), ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode3));
Assert.assertEquals(2, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(WorkerConfig.DEFAULT_CATEGORY));
Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(additionalWorkerCategory));
Assert.assertEquals(task1.getId(), Iterables.getOnlyElement(taskRunner.getRunningTasks()).getTaskId());
Assert.assertEquals(task2.getId(), Iterables.getOnlyElement(taskRunner.getPendingTasks()).getTaskId());
Assert.assertEquals("host3:8080", Iterables.getOnlyElement(taskRunner.markWorkersLazy(Predicates.alwaysTrue(), Integer.MAX_VALUE)).getHost());
Assert.assertEquals(2, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(1, taskRunner.getLazyTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(additionalWorkerCategory));
}
use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.
the class CommonCacheNotifier method sendUpdate.
private List<ListenableFuture<StatusResponseHolder>> sendUpdate(String updatedAuthenticatorPrefix, byte[] serializedEntity) {
List<ListenableFuture<StatusResponseHolder>> futures = new ArrayList<>();
for (NodeRole nodeRole : NODE_TYPES) {
DruidNodeDiscovery nodeDiscovery = discoveryProvider.getForNodeRole(nodeRole);
Collection<DiscoveryDruidNode> nodes = nodeDiscovery.getAllNodes();
for (DiscoveryDruidNode node : nodes) {
URL listenerURL = getListenerURL(node.getDruidNode(), StringUtils.format(baseUrl, StringUtils.urlEncode(updatedAuthenticatorPrefix)));
// best effort, if this fails, remote node will poll and pick up the update eventually
Request req = new Request(HttpMethod.POST, listenerURL);
req.setContent(MediaType.APPLICATION_JSON, serializedEntity);
BasicAuthDBConfig itemConfig = itemConfigMap.get(updatedAuthenticatorPrefix);
ListenableFuture<StatusResponseHolder> future = httpClient.go(req, new ResponseHandler(), Duration.millis(itemConfig.getCacheNotificationTimeout()));
futures.add(future);
}
}
return futures;
}
Aggregations