Search in sources :

Example 6 with Logger

use of org.apache.logging.log4j.Logger in project elasticsearch by elastic.

the class UnicastZenPing method resolveHostsLists.

/**
     * Resolves a list of hosts to a list of discovery nodes. Each host is resolved into a transport address (or a collection of addresses
     * if the number of ports is greater than one) and the transport addresses are used to created discovery nodes. Host lookups are done
     * in parallel using specified executor service up to the specified resolve timeout.
     *
     * @param executorService  the executor service used to parallelize hostname lookups
     * @param logger           logger used for logging messages regarding hostname lookups
     * @param hosts            the hosts to resolve
     * @param limitPortCounts  the number of ports to resolve (should be 1 for non-local transport)
     * @param transportService the transport service
     * @param nodeId_prefix    a prefix to use for node ids
     * @param resolveTimeout   the timeout before returning from hostname lookups
     * @return a list of discovery nodes with resolved transport addresses
     */
public static List<DiscoveryNode> resolveHostsLists(final ExecutorService executorService, final Logger logger, final List<String> hosts, final int limitPortCounts, final TransportService transportService, final String nodeId_prefix, final TimeValue resolveTimeout) throws InterruptedException {
    Objects.requireNonNull(executorService);
    Objects.requireNonNull(logger);
    Objects.requireNonNull(hosts);
    Objects.requireNonNull(transportService);
    Objects.requireNonNull(nodeId_prefix);
    Objects.requireNonNull(resolveTimeout);
    if (resolveTimeout.nanos() < 0) {
        throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]");
    }
    // create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete
    final List<Callable<TransportAddress[]>> callables = hosts.stream().map(hn -> (Callable<TransportAddress[]>) () -> transportService.addressesFromString(hn, limitPortCounts)).collect(Collectors.toList());
    final List<Future<TransportAddress[]>> futures = executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);
    final List<DiscoveryNode> discoveryNodes = new ArrayList<>();
    final Set<TransportAddress> localAddresses = new HashSet<>();
    localAddresses.add(transportService.boundAddress().publishAddress());
    localAddresses.addAll(Arrays.asList(transportService.boundAddress().boundAddresses()));
    // ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
    // hostname with the corresponding task by iterating together
    final Iterator<String> it = hosts.iterator();
    for (final Future<TransportAddress[]> future : futures) {
        final String hostname = it.next();
        if (!future.isCancelled()) {
            assert future.isDone();
            try {
                final TransportAddress[] addresses = future.get();
                logger.trace("resolved host [{}] to {}", hostname, addresses);
                for (int addressId = 0; addressId < addresses.length; addressId++) {
                    final TransportAddress address = addresses[addressId];
                    // no point in pinging ourselves
                    if (localAddresses.contains(address) == false) {
                        discoveryNodes.add(new DiscoveryNode(nodeId_prefix + hostname + "_" + addressId + "#", address, emptyMap(), emptySet(), Version.CURRENT.minimumCompatibilityVersion()));
                    }
                }
            } catch (final ExecutionException e) {
                assert e.getCause() != null;
                final String message = "failed to resolve host [" + hostname + "]";
                logger.warn(message, e.getCause());
            }
        } else {
            logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname);
        }
    }
    return discoveryNodes;
}
Also used : StreamOutput(org.elasticsearch.common.io.stream.StreamOutput) Arrays(java.util.Arrays) TransportRequest(org.elasticsearch.transport.TransportRequest) Releasables(org.elasticsearch.common.lease.Releasables) Property(org.elasticsearch.common.settings.Setting.Property) ConcurrentCollections(org.elasticsearch.common.util.concurrent.ConcurrentCollections) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) ConcurrentCollections.newConcurrentMap(org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) Future(java.util.concurrent.Future) Settings(org.elasticsearch.common.settings.Settings) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Locale(java.util.Locale) PingResponse.readPingResponse(org.elasticsearch.discovery.zen.ZenPing.PingResponse.readPingResponse) Map(java.util.Map) ThreadPool(org.elasticsearch.threadpool.ThreadPool) ClusterName(org.elasticsearch.cluster.ClusterName) CollectionUtils(org.elasticsearch.common.util.CollectionUtils) ThreadFactory(java.util.concurrent.ThreadFactory) Releasable(org.elasticsearch.common.lease.Releasable) Setting(org.elasticsearch.common.settings.Setting) Collections.emptyList(java.util.Collections.emptyList) Set(java.util.Set) KeyedLock(org.elasticsearch.common.util.concurrent.KeyedLock) TransportRequestHandler(org.elasticsearch.transport.TransportRequestHandler) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Version(org.elasticsearch.Version) Stream(java.util.stream.Stream) TransportAddress(org.elasticsearch.common.transport.TransportAddress) Supplier(org.apache.logging.log4j.util.Supplier) TransportResponseHandler(org.elasticsearch.transport.TransportResponseHandler) TransportRequestOptions(org.elasticsearch.transport.TransportRequestOptions) Queue(java.util.Queue) TransportException(org.elasticsearch.transport.TransportException) TransportChannel(org.elasticsearch.transport.TransportChannel) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Callable(java.util.concurrent.Callable) EsThreadPoolExecutor(org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) TimeValue(org.elasticsearch.common.unit.TimeValue) TransportResponse(org.elasticsearch.transport.TransportResponse) TransportService(org.elasticsearch.transport.TransportService) ExecutorService(java.util.concurrent.ExecutorService) ConnectionProfile(org.elasticsearch.transport.ConnectionProfile) Collections.emptyMap(java.util.Collections.emptyMap) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) EsExecutors(org.elasticsearch.common.util.concurrent.EsExecutors) AbstractComponent(org.elasticsearch.common.component.AbstractComponent) Iterator(java.util.Iterator) Collections.emptySet(java.util.Collections.emptySet) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) Connection(org.elasticsearch.transport.Transport.Connection) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) StreamInput(org.elasticsearch.common.io.stream.StreamInput) NodeNotConnectedException(org.elasticsearch.transport.NodeNotConnectedException) Collections(java.util.Collections) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) TransportAddress(org.elasticsearch.common.transport.TransportAddress) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) Future(java.util.concurrent.Future) ExecutionException(java.util.concurrent.ExecutionException) HashSet(java.util.HashSet)

Example 7 with Logger

use of org.apache.logging.log4j.Logger in project elasticsearch by elastic.

the class ZenDiscovery method findMaster.

private DiscoveryNode findMaster() {
    logger.trace("starting to ping");
    List<ZenPing.PingResponse> fullPingResponses = pingAndWait(pingTimeout).toList();
    if (fullPingResponses == null) {
        logger.trace("No full ping responses");
        return null;
    }
    if (logger.isTraceEnabled()) {
        StringBuilder sb = new StringBuilder();
        if (fullPingResponses.size() == 0) {
            sb.append(" {none}");
        } else {
            for (ZenPing.PingResponse pingResponse : fullPingResponses) {
                sb.append("\n\t--> ").append(pingResponse);
            }
        }
        logger.trace("full ping responses:{}", sb);
    }
    final DiscoveryNode localNode = clusterService.localNode();
    // add our selves
    assert fullPingResponses.stream().map(ZenPing.PingResponse::node).filter(n -> n.equals(localNode)).findAny().isPresent() == false;
    fullPingResponses.add(new ZenPing.PingResponse(localNode, null, clusterService.state()));
    // filter responses
    final List<ZenPing.PingResponse> pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger);
    List<DiscoveryNode> activeMasters = new ArrayList<>();
    for (ZenPing.PingResponse pingResponse : pingResponses) {
        // any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
        if (pingResponse.master() != null && !localNode.equals(pingResponse.master())) {
            activeMasters.add(pingResponse.master());
        }
    }
    // nodes discovered during pinging
    List<ElectMasterService.MasterCandidate> masterCandidates = new ArrayList<>();
    for (ZenPing.PingResponse pingResponse : pingResponses) {
        if (pingResponse.node().isMasterNode()) {
            masterCandidates.add(new ElectMasterService.MasterCandidate(pingResponse.node(), pingResponse.getClusterStateVersion()));
        }
    }
    if (activeMasters.isEmpty()) {
        if (electMaster.hasEnoughCandidates(masterCandidates)) {
            final ElectMasterService.MasterCandidate winner = electMaster.electMaster(masterCandidates);
            logger.trace("candidate {} won election", winner);
            return winner.getNode();
        } else {
            // if we don't have enough master nodes, we bail, because there are not enough master to elect from
            logger.trace("not enough master nodes [{}]", masterCandidates);
            return null;
        }
    } else {
        assert !activeMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
        // lets tie break between discovered nodes
        return electMaster.tieBreakActiveMasters(activeMasters);
    }
}
Also used : ElasticsearchException(org.elasticsearch.ElasticsearchException) MetaData(org.elasticsearch.cluster.metadata.MetaData) StreamOutput(org.elasticsearch.common.io.stream.StreamOutput) TransportRequest(org.elasticsearch.transport.TransportRequest) Releasables(org.elasticsearch.common.lease.Releasables) Property(org.elasticsearch.common.settings.Setting.Property) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TimeValue.timeValueSeconds(org.elasticsearch.common.unit.TimeValue.timeValueSeconds) ThreadPool(org.elasticsearch.threadpool.ThreadPool) ClusterName(org.elasticsearch.cluster.ClusterName) DiscoveryStats(org.elasticsearch.discovery.DiscoveryStats) Nullable(org.elasticsearch.common.inject.internal.Nullable) Priority(org.elasticsearch.common.Priority) Setting(org.elasticsearch.common.settings.Setting) Set(java.util.Set) TransportRequestHandler(org.elasticsearch.transport.TransportRequestHandler) ClusterChangedEvent(org.elasticsearch.cluster.ClusterChangedEvent) Collectors(java.util.stream.Collectors) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Supplier(org.apache.logging.log4j.util.Supplier) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) EmptyTransportResponseHandler(org.elasticsearch.transport.EmptyTransportResponseHandler) TransportException(org.elasticsearch.transport.TransportException) TransportChannel(org.elasticsearch.transport.TransportChannel) NotMasterException(org.elasticsearch.cluster.NotMasterException) ClusterService(org.elasticsearch.cluster.service.ClusterService) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ClusterStateTaskListener(org.elasticsearch.cluster.ClusterStateTaskListener) NamedWriteableRegistry(org.elasticsearch.common.io.stream.NamedWriteableRegistry) TimeValue(org.elasticsearch.common.unit.TimeValue) TransportResponse(org.elasticsearch.transport.TransportResponse) TransportService(org.elasticsearch.transport.TransportService) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) Discovery(org.elasticsearch.discovery.Discovery) DiscoverySettings(org.elasticsearch.discovery.DiscoverySettings) IOUtils(org.apache.lucene.util.IOUtils) ClusterStateTaskConfig(org.elasticsearch.cluster.ClusterStateTaskConfig) IOException(java.io.IOException) ClusterStateTaskExecutor(org.elasticsearch.cluster.ClusterStateTaskExecutor) AbstractLifecycleComponent(org.elasticsearch.common.component.AbstractLifecycleComponent) Lifecycle(org.elasticsearch.common.component.Lifecycle) ExecutionException(java.util.concurrent.ExecutionException) Consumer(java.util.function.Consumer) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) StreamInput(org.elasticsearch.common.io.stream.StreamInput) LocalClusterUpdateTask(org.elasticsearch.cluster.LocalClusterUpdateTask) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ArrayList(java.util.ArrayList)

Example 8 with Logger

use of org.apache.logging.log4j.Logger in project elasticsearch by elastic.

the class AnalysisRegistry method build.

public IndexAnalyzers build(IndexSettings indexSettings, Map<String, AnalyzerProvider<?>> analyzerProviders, Map<String, AnalyzerProvider<?>> normalizerProviders, Map<String, TokenizerFactory> tokenizerFactoryFactories, Map<String, CharFilterFactory> charFilterFactoryFactories, Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
    Index index = indexSettings.getIndex();
    analyzerProviders = new HashMap<>(analyzerProviders);
    Logger logger = Loggers.getLogger(getClass(), indexSettings.getSettings());
    DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
    Map<String, NamedAnalyzer> analyzerAliases = new HashMap<>();
    Map<String, NamedAnalyzer> analyzers = new HashMap<>();
    Map<String, NamedAnalyzer> normalizers = new HashMap<>();
    for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) {
        processAnalyzerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), analyzerAliases, analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
    }
    for (Map.Entry<String, AnalyzerProvider<?>> entry : normalizerProviders.entrySet()) {
        processNormalizerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), normalizers, tokenFilterFactoryFactories, charFilterFactoryFactories);
    }
    for (Map.Entry<String, NamedAnalyzer> entry : analyzerAliases.entrySet()) {
        String key = entry.getKey();
        if (analyzers.containsKey(key) && ("default".equals(key) || "default_search".equals(key) || "default_search_quoted".equals(key)) == false) {
            throw new IllegalStateException("already registered analyzer with name: " + key);
        } else {
            NamedAnalyzer configured = entry.getValue();
            analyzers.put(key, configured);
        }
    }
    if (!analyzers.containsKey("default")) {
        processAnalyzerFactory(deprecationLogger, indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS), analyzerAliases, analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
    }
    if (!analyzers.containsKey("default_search")) {
        analyzers.put("default_search", analyzers.get("default"));
    }
    if (!analyzers.containsKey("default_search_quoted")) {
        analyzers.put("default_search_quoted", analyzers.get("default_search"));
    }
    NamedAnalyzer defaultAnalyzer = analyzers.get("default");
    if (defaultAnalyzer == null) {
        throw new IllegalArgumentException("no default analyzer configured");
    }
    if (analyzers.containsKey("default_index")) {
        final Version createdVersion = indexSettings.getIndexVersionCreated();
        if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) {
            throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index.getName() + "]");
        } else {
            deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index.getName());
        }
    }
    NamedAnalyzer defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : defaultAnalyzer;
    NamedAnalyzer defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer;
    NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer;
    for (Map.Entry<String, NamedAnalyzer> analyzer : analyzers.entrySet()) {
        if (analyzer.getKey().startsWith("_")) {
            throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\"");
        }
    }
    return new IndexAnalyzers(indexSettings, defaultIndexAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer, unmodifiableMap(analyzers), unmodifiableMap(normalizers));
}
Also used : HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Index(org.elasticsearch.index.Index) DeprecationLogger(org.elasticsearch.common.logging.DeprecationLogger) Logger(org.apache.logging.log4j.Logger) Version(org.elasticsearch.Version) DeprecationLogger(org.elasticsearch.common.logging.DeprecationLogger) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Collections.unmodifiableMap(java.util.Collections.unmodifiableMap)

Example 9 with Logger

use of org.apache.logging.log4j.Logger in project elasticsearch by elastic.

the class MaxMapCountCheckTests method testGetMaxMapCount.

public void testGetMaxMapCount() throws IOException, IllegalAccessException {
    final long procSysVmMaxMapCount = randomIntBetween(1, Integer.MAX_VALUE);
    final BufferedReader reader = mock(BufferedReader.class);
    when(reader.readLine()).thenReturn(Long.toString(procSysVmMaxMapCount));
    final Path procSysVmMaxMapCountPath = PathUtils.get("/proc/sys/vm/max_map_count");
    BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck() {

        @Override
        BufferedReader getBufferedReader(Path path) throws IOException {
            assertEquals(path, procSysVmMaxMapCountPath);
            return reader;
        }
    };
    assertThat(check.getMaxMapCount(), equalTo(procSysVmMaxMapCount));
    verify(reader).close();
    {
        reset(reader);
        final IOException ioException = new IOException("fatal");
        when(reader.readLine()).thenThrow(ioException);
        final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountIOException");
        final MockLogAppender appender = new MockLogAppender();
        appender.start();
        appender.addExpectation(new ParameterizedMessageLoggingExpectation("expected logged I/O exception", "testGetMaxMapCountIOException", Level.WARN, "I/O exception while trying to read [{}]", new Object[] { procSysVmMaxMapCountPath }, e -> ioException == e));
        Loggers.addAppender(logger, appender);
        assertThat(check.getMaxMapCount(logger), equalTo(-1L));
        appender.assertAllExpectationsMatched();
        verify(reader).close();
        Loggers.removeAppender(logger, appender);
        appender.stop();
    }
    {
        reset(reader);
        when(reader.readLine()).thenReturn("eof");
        final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountNumberFormatException");
        final MockLogAppender appender = new MockLogAppender();
        appender.start();
        appender.addExpectation(new ParameterizedMessageLoggingExpectation("expected logged number format exception", "testGetMaxMapCountNumberFormatException", Level.WARN, "unable to parse vm.max_map_count [{}]", new Object[] { "eof" }, e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\"")));
        Loggers.addAppender(logger, appender);
        assertThat(check.getMaxMapCount(logger), equalTo(-1L));
        appender.assertAllExpectationsMatched();
        verify(reader).close();
        Loggers.removeAppender(logger, appender);
        appender.stop();
    }
}
Also used : Path(java.nio.file.Path) MockLogAppender(org.elasticsearch.test.MockLogAppender) BufferedReader(java.io.BufferedReader) IOException(java.io.IOException) Logger(org.apache.logging.log4j.Logger)

Example 10 with Logger

use of org.apache.logging.log4j.Logger in project elasticsearch by elastic.

the class BootstrapChecksTests method testLogMessageInProductionMode.

public void testLogMessageInProductionMode() throws NodeValidationException {
    final Logger logger = mock(Logger.class);
    BootstrapChecks.check(true, Collections.emptyList(), logger);
    verify(logger).info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
    verifyNoMoreInteractions(logger);
}
Also used : Logger(org.apache.logging.log4j.Logger)

Aggregations

Logger (org.apache.logging.log4j.Logger)470 Test (org.junit.Test)223 File (java.io.File)80 Test (org.junit.jupiter.api.Test)69 IOException (java.io.IOException)34 StatusLogger (org.apache.logging.log4j.status.StatusLogger)30 BufferedReader (java.io.BufferedReader)29 LoggerContext (org.apache.logging.log4j.core.LoggerContext)29 Collectors (java.util.stream.Collectors)28 Appender (org.apache.logging.log4j.core.Appender)27 FileReader (java.io.FileReader)26 Path (java.nio.file.Path)26 Level (org.apache.logging.log4j.Level)23 CountDownLatch (java.util.concurrent.CountDownLatch)22 IntStream (java.util.stream.IntStream)20 Map (java.util.Map)19 java.util (java.util)18 LoggerConfig (org.apache.logging.log4j.core.config.LoggerConfig)18 LogManager (org.apache.logging.log4j.LogManager)17 Configuration (org.apache.logging.log4j.core.config.Configuration)17