Search in sources :

Example 1 with DiscoveryDruidNode

use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.

the class TieredBrokerHostSelectorTest method setUp.

@Before
public void setUp() {
    druidNodeDiscoveryProvider = EasyMock.createStrictMock(DruidNodeDiscoveryProvider.class);
    node1 = new DiscoveryDruidNode(new DruidNode("hotBroker", "hotHost", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
    node2 = new DiscoveryDruidNode(new DruidNode("coldBroker", "coldHost1", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
    node3 = new DiscoveryDruidNode(new DruidNode("coldBroker", "coldHost2", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
    druidNodeDiscovery = new DruidNodeDiscovery() {

        @Override
        public Collection<DiscoveryDruidNode> getAllNodes() {
            return ImmutableSet.of(node1, node2, node3);
        }

        @Override
        public void registerListener(Listener listener) {
            listener.nodesAdded(ImmutableList.of(node1, node2, node3));
            listener.nodeViewInitialized();
        }
    };
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.BROKER)).andReturn(druidNodeDiscovery);
    EasyMock.replay(druidNodeDiscoveryProvider);
    brokerSelector = new TieredBrokerHostSelector(new TestRuleManager(null, null), new TieredBrokerConfig() {

        @Override
        public LinkedHashMap<String, String> getTierToBrokerMap() {
            return new LinkedHashMap<String, String>(ImmutableMap.of("hot", "hotBroker", "medium", "mediumBroker", DruidServer.DEFAULT_TIER, "coldBroker"));
        }

        @Override
        public String getDefaultBrokerServiceName() {
            return "hotBroker";
        }
    }, druidNodeDiscoveryProvider, Arrays.asList(new ManualTieredBrokerSelectorStrategy(null), new TimeBoundaryTieredBrokerSelectorStrategy(), new PriorityTieredBrokerSelectorStrategy(0, 1)));
    brokerSelector.start();
}
Also used : DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) LinkedHashMap(java.util.LinkedHashMap) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) Collection(java.util.Collection) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNode(org.apache.druid.server.DruidNode) Before(org.junit.Before)

Example 2 with DiscoveryDruidNode

use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.

the class TieredBrokerHostSelector method start.

@LifecycleStart
public void start() {
    synchronized (lock) {
        if (started) {
            return;
        }
        for (Map.Entry<String, String> entry : tierConfig.getTierToBrokerMap().entrySet()) {
            servers.put(entry.getValue(), new NodesHolder());
        }
        DruidNodeDiscovery druidNodeDiscovery = druidNodeDiscoveryProvider.getForNodeRole(NodeRole.BROKER);
        druidNodeDiscovery.registerListener(new DruidNodeDiscovery.Listener() {

            @Override
            public void nodesAdded(Collection<DiscoveryDruidNode> nodes) {
                nodes.forEach((node) -> {
                    NodesHolder nodesHolder = servers.get(node.getDruidNode().getServiceName());
                    if (nodesHolder != null) {
                        nodesHolder.add(node.getDruidNode().getHostAndPortToUse(), TO_SERVER.apply(node));
                    }
                });
            }

            @Override
            public void nodesRemoved(Collection<DiscoveryDruidNode> nodes) {
                nodes.forEach((node) -> {
                    NodesHolder nodesHolder = servers.get(node.getDruidNode().getServiceName());
                    if (nodesHolder != null) {
                        nodesHolder.remove(node.getDruidNode().getHostAndPortToUse());
                    }
                });
            }
        });
        started = true;
    }
}
Also used : DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) Iterables(com.google.common.collect.Iterables) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) Inject(com.google.inject.Inject) HashMap(java.util.HashMap) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) Pair(org.apache.druid.java.util.common.Pair) Interval(org.joda.time.Interval) DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) LifecycleStop(org.apache.druid.java.util.common.lifecycle.LifecycleStop) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Optional(com.google.common.base.Optional) Map(java.util.Map) DateTimes(org.apache.druid.java.util.common.DateTimes) SqlQuery(org.apache.druid.sql.http.SqlQuery) Function(com.google.common.base.Function) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Collection(java.util.Collection) DateTime(org.joda.time.DateTime) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Rule(org.apache.druid.server.coordinator.rules.Rule) Maps(com.google.common.collect.Maps) QueryContexts(org.apache.druid.query.QueryContexts) List(java.util.List) NodeRole(org.apache.druid.discovery.NodeRole) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) Server(org.apache.druid.client.selector.Server) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart)

Example 3 with DiscoveryDruidNode

use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.

the class HttpServerInventoryView method start.

@LifecycleStart
public void start() {
    synchronized (lifecycleLock) {
        if (!lifecycleLock.canStart()) {
            throw new ISE("can't start.");
        }
        log.info("Starting %s.", execNamePrefix);
        try {
            executor = ScheduledExecutors.fixed(config.getNumThreads(), execNamePrefix + "-%s");
            DruidNodeDiscovery druidNodeDiscovery = druidNodeDiscoveryProvider.getForService(DataNodeService.DISCOVERY_SERVICE_KEY);
            druidNodeDiscovery.registerListener(new DruidNodeDiscovery.Listener() {

                private final AtomicBoolean initialized = new AtomicBoolean(false);

                @Override
                public void nodesAdded(Collection<DiscoveryDruidNode> nodes) {
                    nodes.forEach(node -> serverAdded(toDruidServer(node)));
                }

                @Override
                public void nodesRemoved(Collection<DiscoveryDruidNode> nodes) {
                    nodes.forEach(node -> serverRemoved(toDruidServer(node)));
                }

                @Override
                public void nodeViewInitialized() {
                    if (!initialized.getAndSet(true)) {
                        executor.execute(HttpServerInventoryView.this::serverInventoryInitialized);
                    }
                }

                private DruidServer toDruidServer(DiscoveryDruidNode node) {
                    return new DruidServer(node.getDruidNode().getHostAndPortToUse(), node.getDruidNode().getHostAndPort(), node.getDruidNode().getHostAndTlsPort(), ((DataNodeService) node.getServices().get(DataNodeService.DISCOVERY_SERVICE_KEY)).getMaxSize(), ((DataNodeService) node.getServices().get(DataNodeService.DISCOVERY_SERVICE_KEY)).getServerType(), ((DataNodeService) node.getServices().get(DataNodeService.DISCOVERY_SERVICE_KEY)).getTier(), ((DataNodeService) node.getServices().get(DataNodeService.DISCOVERY_SERVICE_KEY)).getPriority());
                }
            });
            scheduleSyncMonitoring();
            lifecycleLock.started();
        } finally {
            lifecycleLock.exitStart();
        }
        log.info("Started %s.", execNamePrefix);
    }
}
Also used : DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNodeDiscoveryProvider(org.apache.druid.discovery.DruidNodeDiscoveryProvider) ScheduledExecutors(org.apache.druid.java.util.common.concurrent.ScheduledExecutors) HttpClient(org.apache.druid.java.util.http.client.HttpClient) ChangeRequestHttpSyncer(org.apache.druid.server.coordination.ChangeRequestHttpSyncer) URL(java.net.URL) ChangeRequestsSnapshot(org.apache.druid.server.coordination.ChangeRequestsSnapshot) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Collections2(com.google.common.collect.Collections2) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) SegmentChangeRequestLoad(org.apache.druid.server.coordination.SegmentChangeRequestLoad) Pair(org.apache.druid.java.util.common.Pair) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) LifecycleStop(org.apache.druid.java.util.common.lifecycle.LifecycleStop) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) Map(java.util.Map) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Predicates(com.google.common.base.Predicates) TypeReference(com.fasterxml.jackson.core.type.TypeReference) IAE(org.apache.druid.java.util.common.IAE) Function(com.google.common.base.Function) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) RE(org.apache.druid.java.util.common.RE) MalformedURLException(java.net.MalformedURLException) Executor(java.util.concurrent.Executor) Collection(java.util.Collection) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ISE(org.apache.druid.java.util.common.ISE) SegmentChangeRequestDrop(org.apache.druid.server.coordination.SegmentChangeRequestDrop) Maps(com.google.common.collect.Maps) HostAndPort(com.google.common.net.HostAndPort) TimeUnit(java.util.concurrent.TimeUnit) DataNodeService(org.apache.druid.discovery.DataNodeService) List(java.util.List) Predicate(com.google.common.base.Predicate) LifecycleLock(org.apache.druid.concurrent.LifecycleLock) DataSegment(org.apache.druid.timeline.DataSegment) Preconditions(com.google.common.base.Preconditions) SegmentId(org.apache.druid.timeline.SegmentId) DataSegmentChangeRequest(org.apache.druid.server.coordination.DataSegmentChangeRequest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) ISE(org.apache.druid.java.util.common.ISE) DataNodeService(org.apache.druid.discovery.DataNodeService) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart)

Example 4 with DiscoveryDruidNode

use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.

the class CuratorDruidNodeAnnouncerAndDiscoveryTest method testAnnouncementAndDiscovery.

@Test(timeout = 60_000L)
public void testAnnouncementAndDiscovery() throws Exception {
    ObjectMapper objectMapper = new DefaultObjectMapper();
    // additional setup to serde DruidNode
    objectMapper.setInjectableValues(new InjectableValues.Std().addValue(ServerConfig.class, new ServerConfig()).addValue("java.lang.String", "dummy").addValue("java.lang.Integer", 1234).addValue(ObjectMapper.class, objectMapper));
    curator.start();
    curator.blockUntilConnected();
    Announcer announcer = new Announcer(curator, Execs.directExecutor());
    announcer.start();
    CuratorDruidNodeAnnouncer druidNodeAnnouncer = new CuratorDruidNodeAnnouncer(announcer, new ZkPathsConfig(), objectMapper);
    DiscoveryDruidNode coordinatorNode1 = new DiscoveryDruidNode(new DruidNode("s1", "h1", false, 8080, null, true, false), NodeRole.COORDINATOR, ImmutableMap.of());
    DiscoveryDruidNode coordinatorNode2 = new DiscoveryDruidNode(new DruidNode("s2", "h2", false, 8080, null, true, false), NodeRole.COORDINATOR, ImmutableMap.of());
    DiscoveryDruidNode overlordNode1 = new DiscoveryDruidNode(new DruidNode("s3", "h3", false, 8080, null, true, false), NodeRole.OVERLORD, ImmutableMap.of());
    DiscoveryDruidNode overlordNode2 = new DiscoveryDruidNode(new DruidNode("s4", "h4", false, 8080, null, true, false), NodeRole.OVERLORD, ImmutableMap.of());
    druidNodeAnnouncer.announce(coordinatorNode1);
    druidNodeAnnouncer.announce(overlordNode1);
    CuratorDruidNodeDiscoveryProvider druidNodeDiscoveryProvider = new CuratorDruidNodeDiscoveryProvider(curator, new ZkPathsConfig(), objectMapper);
    druidNodeDiscoveryProvider.start();
    DruidNodeDiscovery coordDiscovery = druidNodeDiscoveryProvider.getForNodeRole(NodeRole.COORDINATOR);
    BooleanSupplier coord1NodeDiscovery = druidNodeDiscoveryProvider.getForNode(coordinatorNode1.getDruidNode(), NodeRole.COORDINATOR);
    DruidNodeDiscovery overlordDiscovery = druidNodeDiscoveryProvider.getForNodeRole(NodeRole.OVERLORD);
    BooleanSupplier overlord1NodeDiscovery = druidNodeDiscoveryProvider.getForNode(overlordNode1.getDruidNode(), NodeRole.OVERLORD);
    while (!checkNodes(ImmutableSet.of(coordinatorNode1), coordDiscovery.getAllNodes()) && !coord1NodeDiscovery.getAsBoolean()) {
        Thread.sleep(100);
    }
    while (!checkNodes(ImmutableSet.of(overlordNode1), overlordDiscovery.getAllNodes()) && !overlord1NodeDiscovery.getAsBoolean()) {
        Thread.sleep(100);
    }
    HashSet<DiscoveryDruidNode> coordNodes = new HashSet<>();
    coordDiscovery.registerListener(createSetAggregatingListener(coordNodes));
    HashSet<DiscoveryDruidNode> overlordNodes = new HashSet<>();
    overlordDiscovery.registerListener(createSetAggregatingListener(overlordNodes));
    while (!checkNodes(ImmutableSet.of(coordinatorNode1), coordNodes)) {
        Thread.sleep(100);
    }
    while (!checkNodes(ImmutableSet.of(overlordNode1), overlordNodes)) {
        Thread.sleep(100);
    }
    druidNodeAnnouncer.announce(coordinatorNode2);
    druidNodeAnnouncer.announce(overlordNode2);
    while (!checkNodes(ImmutableSet.of(coordinatorNode1, coordinatorNode2), coordDiscovery.getAllNodes())) {
        Thread.sleep(100);
    }
    while (!checkNodes(ImmutableSet.of(overlordNode1, overlordNode2), overlordDiscovery.getAllNodes())) {
        Thread.sleep(100);
    }
    while (!checkNodes(ImmutableSet.of(coordinatorNode1, coordinatorNode2), coordNodes)) {
        Thread.sleep(100);
    }
    while (!checkNodes(ImmutableSet.of(overlordNode1, overlordNode2), overlordNodes)) {
        Thread.sleep(100);
    }
    druidNodeAnnouncer.unannounce(coordinatorNode1);
    druidNodeAnnouncer.unannounce(coordinatorNode2);
    druidNodeAnnouncer.unannounce(overlordNode1);
    druidNodeAnnouncer.unannounce(overlordNode2);
    while (!checkNodes(ImmutableSet.of(), coordDiscovery.getAllNodes())) {
        Thread.sleep(100);
    }
    while (!checkNodes(ImmutableSet.of(), overlordDiscovery.getAllNodes())) {
        Thread.sleep(100);
    }
    while (!coordNodes.isEmpty()) {
        Thread.sleep(100);
    }
    while (!overlordNodes.isEmpty()) {
        Thread.sleep(100);
    }
    druidNodeDiscoveryProvider.stop();
    announcer.stop();
}
Also used : DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) ServerConfig(org.apache.druid.server.initialization.ServerConfig) Announcer(org.apache.druid.curator.announcement.Announcer) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) DruidNode(org.apache.druid.server.DruidNode) BooleanSupplier(java.util.function.BooleanSupplier) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 5 with DiscoveryDruidNode

use of org.apache.druid.discovery.DiscoveryDruidNode in project druid by druid-io.

the class RealtimeIndexTask method run.

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    runThread = Thread.currentThread();
    if (this.plumber != null) {
        throw new IllegalStateException("Plumber must be null");
    }
    setupTimeoutAlert();
    boolean normalExit = true;
    // It would be nice to get the PlumberSchool in the constructor.  Although that will need jackson injectables for
    // stuff like the ServerView, which seems kind of odd?  Perhaps revisit this when Guice has been introduced.
    final SegmentPublisher segmentPublisher = new TaskActionSegmentPublisher(toolbox);
    // NOTE: We talk to the coordinator in various places in the plumber and we could be more robust to issues
    // with the coordinator.  Right now, we'll block/throw in whatever thread triggered the coordinator behavior,
    // which will typically be either the main data processing loop or the persist thread.
    // Wrap default DataSegmentAnnouncer such that we unlock intervals as we unannounce segments
    final long lockTimeoutMs = getContextValue(Tasks.LOCK_TIMEOUT_KEY, Tasks.DEFAULT_LOCK_TIMEOUT_MILLIS);
    // Note: if lockTimeoutMs is larger than ServerConfig.maxIdleTime, http timeout error can occur while waiting for a
    // lock to be acquired.
    final DataSegmentAnnouncer lockingSegmentAnnouncer = new DataSegmentAnnouncer() {

        @Override
        public void announceSegment(final DataSegment segment) throws IOException {
            // Side effect: Calling announceSegment causes a lock to be acquired
            final TaskLock lock = Preconditions.checkNotNull(toolbox.getTaskActionClient().submit(new TimeChunkLockAcquireAction(TaskLockType.EXCLUSIVE, segment.getInterval(), lockTimeoutMs)), "Cannot acquire a lock for interval[%s]", segment.getInterval());
            if (lock.isRevoked()) {
                throw new ISE(StringUtils.format("Lock for interval [%s] was revoked.", segment.getInterval()));
            }
            toolbox.getSegmentAnnouncer().announceSegment(segment);
        }

        @Override
        public void unannounceSegment(final DataSegment segment) throws IOException {
            try {
                toolbox.getSegmentAnnouncer().unannounceSegment(segment);
            } finally {
                toolbox.getTaskActionClient().submit(new LockReleaseAction(segment.getInterval()));
            }
        }

        @Override
        public void announceSegments(Iterable<DataSegment> segments) throws IOException {
            // Side effect: Calling announceSegments causes locks to be acquired
            for (DataSegment segment : segments) {
                final TaskLock lock = Preconditions.checkNotNull(toolbox.getTaskActionClient().submit(new TimeChunkLockAcquireAction(TaskLockType.EXCLUSIVE, segment.getInterval(), lockTimeoutMs)), "Cannot acquire a lock for interval[%s]", segment.getInterval());
                if (lock.isRevoked()) {
                    throw new ISE(StringUtils.format("Lock for interval [%s] was revoked.", segment.getInterval()));
                }
            }
            toolbox.getSegmentAnnouncer().announceSegments(segments);
        }

        @Override
        public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {
            try {
                toolbox.getSegmentAnnouncer().unannounceSegments(segments);
            } finally {
                for (DataSegment segment : segments) {
                    toolbox.getTaskActionClient().submit(new LockReleaseAction(segment.getInterval()));
                }
            }
        }
    };
    // NOTE: getVersion will block if there is lock contention, which will block plumber.getSink
    // NOTE: (and thus the firehose)
    // Shouldn't usually happen, since we don't expect people to submit tasks that intersect with the
    // realtime window, but if they do it can be problematic. If we decide to care, we can use more threads in
    // the plumber such that waiting for the coordinator doesn't block data processing.
    final VersioningPolicy versioningPolicy = new VersioningPolicy() {

        @Override
        public String getVersion(final Interval interval) {
            try {
                // Side effect: Calling getVersion causes a lock to be acquired
                final TimeChunkLockAcquireAction action = new TimeChunkLockAcquireAction(TaskLockType.EXCLUSIVE, interval, lockTimeoutMs);
                final TaskLock lock = Preconditions.checkNotNull(toolbox.getTaskActionClient().submit(action), "Cannot acquire a lock for interval[%s]", interval);
                if (lock.isRevoked()) {
                    throw new ISE(StringUtils.format("Lock for interval [%s] was revoked.", interval));
                }
                return lock.getVersion();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };
    DataSchema dataSchema = spec.getDataSchema();
    RealtimeIOConfig realtimeIOConfig = spec.getIOConfig();
    RealtimeTuningConfig tuningConfig = spec.getTuningConfig().withBasePersistDirectory(toolbox.getPersistDir()).withVersioningPolicy(versioningPolicy);
    final FireDepartment fireDepartment = new FireDepartment(dataSchema, realtimeIOConfig, tuningConfig);
    this.metrics = fireDepartment.getMetrics();
    final RealtimeMetricsMonitor metricsMonitor = TaskRealtimeMetricsMonitorBuilder.build(this, fireDepartment);
    this.queryRunnerFactoryConglomerate = toolbox.getQueryRunnerFactoryConglomerate();
    // NOTE: This pusher selects path based purely on global configuration and the DataSegment, which means
    // NOTE: that redundant realtime tasks will upload to the same location. This can cause index.zip
    // NOTE: (partitionNum_index.zip for HDFS data storage) to mismatch, or it can cause historical nodes to load
    // NOTE: different instances of the "same" segment.
    final PlumberSchool plumberSchool = new RealtimePlumberSchool(toolbox.getEmitter(), toolbox.getQueryRunnerFactoryConglomerate(), toolbox.getSegmentPusher(), lockingSegmentAnnouncer, segmentPublisher, toolbox.getSegmentHandoffNotifierFactory(), toolbox.getQueryProcessingPool(), toolbox.getJoinableFactory(), toolbox.getIndexMergerV9(), toolbox.getIndexIO(), toolbox.getCache(), toolbox.getCacheConfig(), toolbox.getCachePopulatorStats(), toolbox.getJsonMapper());
    this.plumber = plumberSchool.findPlumber(dataSchema, tuningConfig, metrics);
    final Supplier<Committer> committerSupplier = Committers.nilSupplier();
    LookupNodeService lookupNodeService = getContextValue(CTX_KEY_LOOKUP_TIER) == null ? toolbox.getLookupNodeService() : new LookupNodeService((String) getContextValue(CTX_KEY_LOOKUP_TIER));
    DiscoveryDruidNode discoveryDruidNode = new DiscoveryDruidNode(toolbox.getDruidNode(), NodeRole.PEON, ImmutableMap.of(toolbox.getDataNodeService().getName(), toolbox.getDataNodeService(), lookupNodeService.getName(), lookupNodeService));
    try {
        toolbox.getDataSegmentServerAnnouncer().announce();
        toolbox.getDruidNodeAnnouncer().announce(discoveryDruidNode);
        plumber.startJob();
        // Set up metrics emission
        toolbox.addMonitor(metricsMonitor);
        // Delay firehose connection to avoid claiming input resources while the plumber is starting up.
        final FirehoseFactory firehoseFactory = spec.getIOConfig().getFirehoseFactory();
        final boolean firehoseDrainableByClosing = isFirehoseDrainableByClosing(firehoseFactory);
        // Skip connecting firehose if we've been stopped before we got started.
        synchronized (this) {
            if (!gracefullyStopped) {
                firehose = firehoseFactory.connect(Preconditions.checkNotNull(spec.getDataSchema().getParser(), "inputRowParser"), toolbox.getIndexingTmpDir());
            }
        }
        // Time to read data!
        while (firehose != null && (!gracefullyStopped || firehoseDrainableByClosing) && firehose.hasMore()) {
            Plumbers.addNextRow(committerSupplier, firehose, plumber, tuningConfig.isReportParseExceptions(), metrics);
        }
    } catch (Throwable e) {
        normalExit = false;
        log.makeAlert(e, "Exception aborted realtime processing[%s]", dataSchema.getDataSource()).emit();
        throw e;
    } finally {
        if (normalExit) {
            try {
                // Persist if we had actually started.
                if (firehose != null) {
                    log.info("Persisting remaining data.");
                    final Committer committer = committerSupplier.get();
                    final CountDownLatch persistLatch = new CountDownLatch(1);
                    plumber.persist(new Committer() {

                        @Override
                        public Object getMetadata() {
                            return committer.getMetadata();
                        }

                        @Override
                        public void run() {
                            try {
                                committer.run();
                            } finally {
                                persistLatch.countDown();
                            }
                        }
                    });
                    persistLatch.await();
                }
                if (gracefullyStopped) {
                    log.info("Gracefully stopping.");
                } else {
                    log.info("Finishing the job.");
                    synchronized (this) {
                        if (gracefullyStopped) {
                            // Someone called stopGracefully after we checked the flag. That's okay, just stop now.
                            log.info("Gracefully stopping.");
                        } else {
                            finishingJob = true;
                        }
                    }
                    if (finishingJob) {
                        plumber.finishJob();
                    }
                }
            } catch (InterruptedException e) {
                log.debug(e, "Interrupted while finishing the job");
            } catch (Exception e) {
                log.makeAlert(e, "Failed to finish realtime task").emit();
                throw e;
            } finally {
                if (firehose != null) {
                    CloseableUtils.closeAndSuppressExceptions(firehose, e -> log.warn("Failed to close Firehose"));
                }
                toolbox.removeMonitor(metricsMonitor);
            }
        }
        toolbox.getDataSegmentServerAnnouncer().unannounce();
        toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode);
    }
    log.info("Job done!");
    return TaskStatus.success(getId());
}
Also used : RealtimeIOConfig(org.apache.druid.segment.indexing.RealtimeIOConfig) DataSegmentAnnouncer(org.apache.druid.server.coordination.DataSegmentAnnouncer) EventReceiverFirehoseFactory(org.apache.druid.segment.realtime.firehose.EventReceiverFirehoseFactory) ClippedFirehoseFactory(org.apache.druid.segment.realtime.firehose.ClippedFirehoseFactory) TimedShutoffFirehoseFactory(org.apache.druid.segment.realtime.firehose.TimedShutoffFirehoseFactory) FirehoseFactory(org.apache.druid.data.input.FirehoseFactory) DataSegment(org.apache.druid.timeline.DataSegment) FireDepartment(org.apache.druid.segment.realtime.FireDepartment) SegmentPublisher(org.apache.druid.segment.realtime.SegmentPublisher) TaskLock(org.apache.druid.indexing.common.TaskLock) ISE(org.apache.druid.java.util.common.ISE) IOException(java.io.IOException) PlumberSchool(org.apache.druid.segment.realtime.plumber.PlumberSchool) RealtimePlumberSchool(org.apache.druid.segment.realtime.plumber.RealtimePlumberSchool) LookupNodeService(org.apache.druid.discovery.LookupNodeService) RealtimeTuningConfig(org.apache.druid.segment.indexing.RealtimeTuningConfig) CountDownLatch(java.util.concurrent.CountDownLatch) LockReleaseAction(org.apache.druid.indexing.common.actions.LockReleaseAction) IOException(java.io.IOException) DataSchema(org.apache.druid.segment.indexing.DataSchema) VersioningPolicy(org.apache.druid.segment.realtime.plumber.VersioningPolicy) RealtimePlumberSchool(org.apache.druid.segment.realtime.plumber.RealtimePlumberSchool) DiscoveryDruidNode(org.apache.druid.discovery.DiscoveryDruidNode) TimeChunkLockAcquireAction(org.apache.druid.indexing.common.actions.TimeChunkLockAcquireAction) RealtimeMetricsMonitor(org.apache.druid.segment.realtime.RealtimeMetricsMonitor) Committer(org.apache.druid.data.input.Committer) Interval(org.joda.time.Interval)

Aggregations

DiscoveryDruidNode (org.apache.druid.discovery.DiscoveryDruidNode)25 DruidNodeDiscoveryProvider (org.apache.druid.discovery.DruidNodeDiscoveryProvider)15 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)13 ArrayList (java.util.ArrayList)13 List (java.util.List)13 DruidNode (org.apache.druid.server.DruidNode)13 Test (org.junit.Test)12 ImmutableList (com.google.common.collect.ImmutableList)10 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)10 DruidNodeDiscovery (org.apache.druid.discovery.DruidNodeDiscovery)10 WorkerNodeService (org.apache.druid.discovery.WorkerNodeService)10 TaskStatus (org.apache.druid.indexer.TaskStatus)10 ISE (org.apache.druid.java.util.common.ISE)10 HttpClient (org.apache.druid.java.util.http.client.HttpClient)10 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)9 CuratorFramework (org.apache.curator.framework.CuratorFramework)9 Task (org.apache.druid.indexing.common.task.Task)9 TaskRunnerListener (org.apache.druid.indexing.overlord.TaskRunnerListener)9 TaskStorage (org.apache.druid.indexing.overlord.TaskStorage)9 HttpRemoteTaskRunnerConfig (org.apache.druid.indexing.overlord.config.HttpRemoteTaskRunnerConfig)9