Search in sources :

Example 16 with ZkPathsConfig

use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.

the class HttpRemoteTaskRunnerTest method testWorkerDisapperAndReappearAfterItsCleanup.

@Test(timeout = 60_000L)
public void testWorkerDisapperAndReappearAfterItsCleanup() throws Exception {
    TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
    DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
    EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
    EasyMock.replay(druidNodeDiscoveryProvider);
    ConcurrentMap<String, CustomFunction> workerHolders = new ConcurrentHashMap<>();
    HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {

        @Override
        public Period getTaskCleanupTimeout() {
            return Period.millis(1);
        }
    }, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {

        @Override
        protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
            if (workerHolders.containsKey(worker.getHost())) {
                return workerHolders.get(worker.getHost()).apply(smileMapper, httpClient, config, workersSyncExec, listener, worker, knownAnnouncements);
            } else {
                throw new ISE("No WorkerHolder for [%s].", worker.getHost());
            }
        }
    };
    taskRunner.start();
    Task task1 = NoopTask.create("task-id-1", 0);
    Task task2 = NoopTask.create("task-id-2", 0);
    DiscoveryDruidNode druidNode = new DiscoveryDruidNode(new DruidNode("service", "host", false, 1234, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
    workerHolders.put("host:1234", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task1, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.create("host", 1234, 1235))), task2, ImmutableList.of(TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.create("host", 1234, 1235)))), new AtomicInteger(), ImmutableSet.of()));
    druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode));
    Future<TaskStatus> future1 = taskRunner.run(task1);
    Future<TaskStatus> future2 = taskRunner.run(task2);
    while (taskRunner.getPendingTasks().size() > 0) {
        Thread.sleep(100);
    }
    druidNodeDiscovery.getListeners().get(0).nodesRemoved(ImmutableList.of(druidNode));
    Assert.assertTrue(future1.get().isFailure());
    Assert.assertTrue(future2.get().isFailure());
    Assert.assertNotNull(future1.get().getErrorMsg());
    Assert.assertNotNull(future2.get().getErrorMsg());
    Assert.assertTrue(future1.get().getErrorMsg().startsWith("The worker that this task was assigned disappeared and did not report cleanup within timeout"));
    Assert.assertTrue(future2.get().getErrorMsg().startsWith("The worker that this task was assigned disappeared and did not report cleanup within timeout"));
    AtomicInteger ticks = new AtomicInteger();
    Set<String> actualShutdowns = new ConcurrentHashSet<>();
    workerHolders.put("host:1234", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.success(task1.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.create("host", 1234, 1235))), ImmutableMap.of(), ticks, actualShutdowns));
    druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode));
    while (ticks.get() < 1) {
        Thread.sleep(100);
    }
    Assert.assertEquals(ImmutableSet.of(task2.getId()), actualShutdowns);
    Assert.assertTrue(taskRunner.run(task1).get().isFailure());
    Assert.assertTrue(taskRunner.run(task2).get().isFailure());
}
Also used : IndexerZkConfig(org.apache.druid.server.initialization.IndexerZkConfig) Task(org.apache.druid.indexing.common.task.Task) NoopTask(org.apache.druid.indexing.common.task.NoopTask) TaskRunnerListener(org.apache.druid.indexing.overlord.TaskRunnerListener) WorkerNodeService(org.apache.druid.discovery.WorkerNodeService) CuratorFramework(org.apache.curator.framework.CuratorFramework) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) ConcurrentHashSet(org.eclipse.jetty.util.ConcurrentHashSet) Worker(org.apache.druid.indexing.worker.Worker) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) ISE(org.apache.druid.java.util.common.ISE) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Period(org.joda.time.Period) AtomicReference(java.util.concurrent.atomic.AtomicReference) TaskStatus(org.apache.druid.indexer.TaskStatus) HttpRemoteTaskRunnerConfig(org.apache.druid.indexing.overlord.config.HttpRemoteTaskRunnerConfig) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) HttpClient(org.apache.druid.java.util.http.client.HttpClient) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNode(org.apache.druid.server.DruidNode) Test(org.junit.Test)

Example 17 with ZkPathsConfig

use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.

the class HttpRemoteTaskRunnerTest method createTaskRunnerForTestTaskAddedOrUpdated.

private HttpRemoteTaskRunner createTaskRunnerForTestTaskAddedOrUpdated(TaskStorage taskStorage, List<Object> listenerNotificationsAccumulator) {
    TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
    DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
    EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
    EasyMock.replay(druidNodeDiscoveryProvider);
    HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {

        @Override
        public int getPendingTasksRunnerNumThreads() {
            return 3;
        }
    }, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, taskStorage, EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null));
    taskRunner.start();
    if (listenerNotificationsAccumulator != null) {
        taskRunner.registerListener(new TaskRunnerListener() {

            @Override
            public String getListenerId() {
                return "test-listener";
            }

            @Override
            public void locationChanged(String taskId, TaskLocation newLocation) {
                listenerNotificationsAccumulator.add(ImmutableList.of(taskId, newLocation));
            }

            @Override
            public void statusChanged(String taskId, TaskStatus status) {
                listenerNotificationsAccumulator.add(ImmutableList.of(taskId, status));
            }
        }, Execs.directExecutor());
    }
    return taskRunner;
}
Also used : IndexerZkConfig(org.apache.druid.server.initialization.IndexerZkConfig) TaskRunnerListener(org.apache.druid.indexing.overlord.TaskRunnerListener) AtomicReference(java.util.concurrent.atomic.AtomicReference) TaskStatus(org.apache.druid.indexer.TaskStatus) TaskLocation(org.apache.druid.indexer.TaskLocation) HttpRemoteTaskRunnerConfig(org.apache.druid.indexing.overlord.config.HttpRemoteTaskRunnerConfig) CuratorFramework(org.apache.curator.framework.CuratorFramework) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) HttpClient(org.apache.druid.java.util.http.client.HttpClient)

Example 18 with ZkPathsConfig

use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.

the class HttpRemoteTaskRunnerTest method testMarkWorkersLazy.

@Test(timeout = 60_000L)
public void testMarkWorkersLazy() throws Exception {
    TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
    DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
    EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
    EasyMock.replay(druidNodeDiscoveryProvider);
    Task task1 = NoopTask.create("task-id-1", 0);
    Task task2 = NoopTask.create("task-id-2", 0);
    String additionalWorkerCategory = "category2";
    ConcurrentMap<String, CustomFunction> workerHolders = new ConcurrentHashMap<>();
    HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {

        @Override
        public int getPendingTasksRunnerNumThreads() {
            return 3;
        }
    }, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {

        @Override
        protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
            if (workerHolders.containsKey(worker.getHost())) {
                return workerHolders.get(worker.getHost()).apply(smileMapper, httpClient, config, workersSyncExec, listener, worker, knownAnnouncements);
            } else {
                throw new ISE("No WorkerHolder for [%s].", worker.getHost());
            }
        }
    };
    taskRunner.start();
    Assert.assertTrue(taskRunner.getTotalTaskSlotCount().isEmpty());
    Assert.assertTrue(taskRunner.getIdleTaskSlotCount().isEmpty());
    Assert.assertTrue(taskRunner.getUsedTaskSlotCount().isEmpty());
    AtomicInteger ticks = new AtomicInteger();
    DiscoveryDruidNode druidNode1 = new DiscoveryDruidNode(new DruidNode("service", "host1", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 1, "0", WorkerConfig.DEFAULT_CATEGORY)));
    workerHolders.put("host1:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task1, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.create("host1", 8080, -1)))), ticks, ImmutableSet.of()));
    druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode1));
    Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    taskRunner.run(task1);
    while (ticks.get() < 1) {
        Thread.sleep(100);
    }
    Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    DiscoveryDruidNode druidNode2 = new DiscoveryDruidNode(new DruidNode("service", "host2", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip2", 1, "0", additionalWorkerCategory)));
    workerHolders.put("host2:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task2, ImmutableList.of()), ticks, ImmutableSet.of()));
    druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode2));
    Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
    Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(additionalWorkerCategory).longValue());
    Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
    taskRunner.run(task2);
    while (ticks.get() < 2) {
        Thread.sleep(100);
    }
    Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
    Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
    Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
    DiscoveryDruidNode druidNode3 = new DiscoveryDruidNode(new DruidNode("service", "host3", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip2", 1, "0", WorkerConfig.DEFAULT_CATEGORY)));
    workerHolders.put("host3:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(), new AtomicInteger(), ImmutableSet.of()));
    druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode3));
    Assert.assertEquals(2, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
    Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
    Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
    Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(WorkerConfig.DEFAULT_CATEGORY));
    Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(additionalWorkerCategory));
    Assert.assertEquals(task1.getId(), Iterables.getOnlyElement(taskRunner.getRunningTasks()).getTaskId());
    Assert.assertEquals(task2.getId(), Iterables.getOnlyElement(taskRunner.getPendingTasks()).getTaskId());
    Assert.assertEquals("host3:8080", Iterables.getOnlyElement(taskRunner.markWorkersLazy(Predicates.alwaysTrue(), Integer.MAX_VALUE)).getHost());
    Assert.assertEquals(2, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
    Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
    Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
    Assert.assertEquals(1, taskRunner.getLazyTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
    Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(additionalWorkerCategory));
}
Also used : IndexerZkConfig(org.apache.druid.server.initialization.IndexerZkConfig) Task(org.apache.druid.indexing.common.task.Task) NoopTask(org.apache.druid.indexing.common.task.NoopTask) TaskRunnerListener(org.apache.druid.indexing.overlord.TaskRunnerListener) WorkerNodeService(org.apache.druid.discovery.WorkerNodeService) CuratorFramework(org.apache.curator.framework.CuratorFramework) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) Worker(org.apache.druid.indexing.worker.Worker) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) ISE(org.apache.druid.java.util.common.ISE) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) AtomicReference(java.util.concurrent.atomic.AtomicReference) HttpRemoteTaskRunnerConfig(org.apache.druid.indexing.overlord.config.HttpRemoteTaskRunnerConfig) TaskStorage(org.apache.druid.indexing.overlord.TaskStorage) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) HttpClient(org.apache.druid.java.util.http.client.HttpClient) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNode(org.apache.druid.server.DruidNode) Test(org.junit.Test)

Example 19 with ZkPathsConfig

use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.

the class WorkerTaskMonitorTest method setUp.

@Before
public void setUp() throws Exception {
    testingCluster = new TestingCluster(1);
    testingCluster.start();
    cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
    cf.start();
    cf.blockUntilConnected();
    cf.create().creatingParentsIfNeeded().forPath(BASE_PATH);
    worker = new Worker("http", "worker", "localhost", 3, "0", WorkerConfig.DEFAULT_CATEGORY);
    workerCuratorCoordinator = new WorkerCuratorCoordinator(jsonMapper, new IndexerZkConfig(new ZkPathsConfig() {

        @Override
        public String getBase() {
            return BASE_PATH;
        }
    }, null, null, null, null), new TestRemoteTaskRunnerConfig(new Period("PT1S")), cf, worker);
    workerCuratorCoordinator.start();
    // Start a task monitor
    workerTaskMonitor = createTaskMonitor();
    TestTasks.registerSubtypes(jsonMapper);
    jsonMapper.registerSubtypes(new NamedType(TestRealtimeTask.class, "test_realtime"));
    workerTaskMonitor.start();
    task = TestTasks.immediateSuccess("test");
}
Also used : IndexerZkConfig(org.apache.druid.server.initialization.IndexerZkConfig) TestRemoteTaskRunnerConfig(org.apache.druid.indexing.overlord.TestRemoteTaskRunnerConfig) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) NamedType(com.fasterxml.jackson.databind.jsontype.NamedType) Period(org.joda.time.Period) PotentiallyGzippedCompressionProvider(org.apache.druid.curator.PotentiallyGzippedCompressionProvider) TestRealtimeTask(org.apache.druid.indexing.common.TestRealtimeTask) TestingCluster(org.apache.curator.test.TestingCluster) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) Before(org.junit.Before)

Example 20 with ZkPathsConfig

use of org.apache.druid.server.initialization.ZkPathsConfig in project druid by druid-io.

the class BatchServerInventoryViewTest method testSameTimeZnode.

@Test
public void testSameTimeZnode() throws Exception {
    final int numThreads = INITIAL_SEGMENTS / 10;
    final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads));
    segmentAnnouncer.announceSegments(testSegments);
    waitForSync(batchServerInventoryView, testSegments);
    DruidServer server = Iterables.get(batchServerInventoryView.getInventory(), 0);
    final Set<DataSegment> segments = Sets.newHashSet(server.iterateAllSegments());
    Assert.assertEquals(testSegments, segments);
    final CountDownLatch latch = new CountDownLatch(numThreads);
    final List<ListenableFuture<BatchDataSegmentAnnouncer>> futures = new ArrayList<>();
    for (int i = 0; i < numThreads; ++i) {
        final int ii = i;
        futures.add(executor.submit(new Callable<BatchDataSegmentAnnouncer>() {

            @Override
            public BatchDataSegmentAnnouncer call() {
                BatchDataSegmentAnnouncer segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0), new BatchDataSegmentAnnouncerConfig() {

                    @Override
                    public int getSegmentsPerNode() {
                        return 50;
                    }
                }, new ZkPathsConfig() {

                    @Override
                    public String getBase() {
                        return TEST_BASE_PATH;
                    }
                }, announcer, jsonMapper);
                List<DataSegment> segments = new ArrayList<DataSegment>();
                try {
                    for (int j = 0; j < INITIAL_SEGMENTS / numThreads; ++j) {
                        segments.add(makeSegment(INITIAL_SEGMENTS + ii + numThreads * j));
                    }
                    latch.countDown();
                    latch.await();
                    segmentAnnouncer.announceSegments(segments);
                    testSegments.addAll(segments);
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
                return segmentAnnouncer;
            }
        }));
    }
    final List<BatchDataSegmentAnnouncer> announcers = Futures.allAsList(futures).get();
    Assert.assertEquals(INITIAL_SEGMENTS * 2, testSegments.size());
    waitForSync(batchServerInventoryView, testSegments);
    Assert.assertEquals(testSegments, Sets.newHashSet(server.iterateAllSegments()));
    for (int i = 0; i < INITIAL_SEGMENTS; ++i) {
        final DataSegment segment = makeSegment(100 + i);
        segmentAnnouncer.unannounceSegment(segment);
        testSegments.remove(segment);
    }
    waitForSync(batchServerInventoryView, testSegments);
    Assert.assertEquals(testSegments, Sets.newHashSet(server.iterateAllSegments()));
}
Also used : ArrayList(java.util.ArrayList) BatchDataSegmentAnnouncerConfig(org.apache.druid.server.initialization.BatchDataSegmentAnnouncerConfig) DruidServer(org.apache.druid.client.DruidServer) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) Callable(java.util.concurrent.Callable) ExpectedException(org.junit.rules.ExpectedException) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) BatchDataSegmentAnnouncer(org.apache.druid.server.coordination.BatchDataSegmentAnnouncer) Test(org.junit.Test)

Aggregations

ZkPathsConfig (org.apache.druid.server.initialization.ZkPathsConfig)26 AtomicReference (java.util.concurrent.atomic.AtomicReference)16 DruidNode (org.apache.druid.server.DruidNode)15 Test (org.junit.Test)15 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)14 CuratorFramework (org.apache.curator.framework.CuratorFramework)13 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)12 IndexerZkConfig (org.apache.druid.server.initialization.IndexerZkConfig)12 ImmutableList (com.google.common.collect.ImmutableList)11 List (java.util.List)11 ArrayList (java.util.ArrayList)10 HttpClient (org.apache.druid.java.util.http.client.HttpClient)10 Before (org.junit.Before)10 DruidNodeDiscoveryProvider (org.apache.druid.discovery.DruidNodeDiscoveryProvider)9 TaskRunnerListener (org.apache.druid.indexing.overlord.TaskRunnerListener)9 HttpRemoteTaskRunnerConfig (org.apache.druid.indexing.overlord.config.HttpRemoteTaskRunnerConfig)9 Worker (org.apache.druid.indexing.worker.Worker)9 DiscoveryDruidNode (org.apache.druid.discovery.DiscoveryDruidNode)8 TaskStorage (org.apache.druid.indexing.overlord.TaskStorage)8 CoordinatorCustomDutyGroups (org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups)8