Search in sources :

Example 41 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class Translog method recoverFromFiles.

/** recover all translog files found on disk */
private ArrayList<TranslogReader> recoverFromFiles(TranslogGeneration translogGeneration, Checkpoint checkpoint) throws IOException {
    boolean success = false;
    ArrayList<TranslogReader> foundTranslogs = new ArrayList<>();
    // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
    final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX);
    boolean tempFileRenamed = false;
    try (ReleasableLock lock = writeLock.acquire()) {
        logger.debug("open uncommitted translog checkpoint {}", checkpoint);
        final String checkpointTranslogFile = getFilename(checkpoint.generation);
        for (long i = translogGeneration.translogFileGeneration; i < checkpoint.generation; i++) {
            Path committedTranslogFile = location.resolve(getFilename(i));
            if (Files.exists(committedTranslogFile) == false) {
                throw new IllegalStateException("translog file doesn't exist with generation: " + i + " lastCommitted: " + lastCommittedTranslogFileGeneration + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive");
            }
            final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
            foundTranslogs.add(reader);
            logger.debug("recovered local translog from checkpoint {}", checkpoint);
        }
        foundTranslogs.add(openReader(location.resolve(checkpointTranslogFile), checkpoint));
        Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(checkpoint.generation));
        if (Files.exists(commitCheckpoint)) {
            Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint);
            if (checkpoint.equals(checkpointFromDisk) == false) {
                throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk);
            }
        } else {
            // we first copy this into the temp-file and then fsync it followed by an atomic move into the target file
            // that way if we hit a disk-full here we are still in an consistent state.
            Files.copy(location.resolve(CHECKPOINT_FILE_NAME), tempFile, StandardCopyOption.REPLACE_EXISTING);
            IOUtils.fsync(tempFile, false);
            Files.move(tempFile, commitCheckpoint, StandardCopyOption.ATOMIC_MOVE);
            tempFileRenamed = true;
            // we only fsync the directory the tempFile was already fsynced
            IOUtils.fsync(commitCheckpoint.getParent(), true);
        }
        success = true;
    } finally {
        if (success == false) {
            IOUtils.closeWhileHandlingException(foundTranslogs);
        }
        if (tempFileRenamed == false) {
            try {
                Files.delete(tempFile);
            } catch (IOException ex) {
                logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex);
            }
        }
    }
    return foundTranslogs;
}
Also used : Path(java.nio.file.Path) ArrayList(java.util.ArrayList) IOException(java.io.IOException) ReleasableLock(org.elasticsearch.common.util.concurrent.ReleasableLock) LongSupplier(java.util.function.LongSupplier) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 42 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class ClusterServiceIT method testAckedUpdateTaskSameClusterState.

public void testAckedUpdateTaskSameClusterState() throws Exception {
    internalCluster().startNode();
    ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
    final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
    final AtomicBoolean ackTimeout = new AtomicBoolean(false);
    final AtomicBoolean onFailure = new AtomicBoolean(false);
    final AtomicBoolean executed = new AtomicBoolean(false);
    final CountDownLatch latch = new CountDownLatch(1);
    final CountDownLatch processedLatch = new CountDownLatch(1);
    clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {

        @Override
        protected Void newResponse(boolean acknowledged) {
            return null;
        }

        @Override
        public void onAllNodesAcked(@Nullable Exception e) {
            allNodesAcked.set(true);
            latch.countDown();
        }

        @Override
        public void onAckTimeout() {
            ackTimeout.set(true);
            latch.countDown();
        }

        @Override
        public TimeValue ackTimeout() {
            return TimeValue.timeValueSeconds(10);
        }

        @Override
        public TimeValue timeout() {
            return TimeValue.timeValueSeconds(10);
        }

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            processedLatch.countDown();
        }

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            executed.set(true);
            return currentState;
        }

        @Override
        public void onFailure(String source, Exception e) {
            logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e);
            onFailure.set(true);
            latch.countDown();
        }
    });
    ensureGreen();
    assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
    assertThat(allNodesAcked.get(), equalTo(true));
    assertThat(ackTimeout.get(), equalTo(false));
    assertThat(executed.get(), equalTo(true));
    assertThat(onFailure.get(), equalTo(false));
    assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) TimeValue(org.elasticsearch.common.unit.TimeValue)

Example 43 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class RemoteClusterService method processRemoteShards.

Function<String, Transport.Connection> processRemoteShards(Map<String, ClusterSearchShardsResponse> searchShardsResponses, List<ShardIterator> remoteShardIterators, Map<String, AliasFilter> aliasFilterMap) {
    Map<String, Supplier<Transport.Connection>> nodeToCluster = new HashMap<>();
    for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) {
        String clusterName = entry.getKey();
        ClusterSearchShardsResponse searchShardsResponse = entry.getValue();
        for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) {
            nodeToCluster.put(remoteNode.getId(), () -> getConnection(remoteNode, clusterName));
        }
        Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters();
        for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
            //add the cluster name to the remote index names for indices disambiguation
            //this ends up in the hits returned with the search response
            ShardId shardId = clusterSearchShardsGroup.getShardId();
            Index remoteIndex = shardId.getIndex();
            Index index = new Index(clusterName + REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(), remoteIndex.getUUID());
            ShardIterator shardIterator = new PlainShardIterator(new ShardId(index, shardId.getId()), Arrays.asList(clusterSearchShardsGroup.getShards()));
            remoteShardIterators.add(shardIterator);
            AliasFilter aliasFilter;
            if (indicesAndFilters == null) {
                aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY);
            } else {
                aliasFilter = indicesAndFilters.get(shardId.getIndexName());
                assert aliasFilter != null;
            }
            // here we have to map the filters to the UUID since from now on we use the uuid for the lookup
            aliasFilterMap.put(remoteIndex.getUUID(), aliasFilter);
        }
    }
    return (nodeId) -> {
        Supplier<Transport.Connection> supplier = nodeToCluster.get(nodeId);
        if (supplier == null) {
            throw new IllegalArgumentException("unknown remote node: " + nodeId);
        }
        return supplier.get();
    };
}
Also used : ClusterSearchShardsResponse(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse) ShardId(org.elasticsearch.index.shard.ShardId) Arrays(java.util.Arrays) ShardIterator(org.elasticsearch.cluster.routing.ShardIterator) TimeoutException(java.util.concurrent.TimeoutException) PlainShardIterator(org.elasticsearch.cluster.routing.PlainShardIterator) HashMap(java.util.HashMap) Index(org.elasticsearch.index.Index) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) Strings(org.elasticsearch.common.Strings) ArrayList(java.util.ArrayList) InetAddress(java.net.InetAddress) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Settings(org.elasticsearch.common.settings.Settings) TimeValue(org.elasticsearch.common.unit.TimeValue) Map(java.util.Map) CountDown(org.elasticsearch.common.util.concurrent.CountDown) TransportService(org.elasticsearch.transport.TransportService) ClusterSearchShardsGroup(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup) Transport(org.elasticsearch.transport.Transport) AbstractComponent(org.elasticsearch.common.component.AbstractComponent) Setting(org.elasticsearch.common.settings.Setting) Predicate(java.util.function.Predicate) PlainActionFuture(org.elasticsearch.action.support.PlainActionFuture) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) InetSocketAddress(java.net.InetSocketAddress) UnknownHostException(java.net.UnknownHostException) Collectors(java.util.stream.Collectors) ClusterSearchShardsResponse(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse) TimeUnit(java.util.concurrent.TimeUnit) AliasFilter(org.elasticsearch.search.internal.AliasFilter) List(java.util.List) Version(org.elasticsearch.Version) Stream(java.util.stream.Stream) TransportAddress(org.elasticsearch.common.transport.TransportAddress) Supplier(org.apache.logging.log4j.util.Supplier) Closeable(java.io.Closeable) TransportException(org.elasticsearch.transport.TransportException) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) AliasFilter(org.elasticsearch.search.internal.AliasFilter) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) PlainShardIterator(org.elasticsearch.cluster.routing.PlainShardIterator) Index(org.elasticsearch.index.Index) ClusterSearchShardsGroup(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup) ShardId(org.elasticsearch.index.shard.ShardId) ShardIterator(org.elasticsearch.cluster.routing.ShardIterator) PlainShardIterator(org.elasticsearch.cluster.routing.PlainShardIterator) Supplier(org.apache.logging.log4j.util.Supplier) Transport(org.elasticsearch.transport.Transport) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 44 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class ClientScrollableHitSource method clearScroll.

@Override
public void clearScroll(String scrollId, Runnable onCompletion) {
    ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
    clearScrollRequest.addScrollId(scrollId);
    /*
         * Unwrap the client so we don't set our task as the parent. If we *did* set our ID then the clear scroll would be cancelled as
         * if this task is cancelled. But we want to clear the scroll regardless of whether or not the main request was cancelled.
         */
    client.unwrap().clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {

        @Override
        public void onResponse(ClearScrollResponse response) {
            logger.debug("Freed [{}] contexts", response.getNumFreed());
            onCompletion.run();
        }

        @Override
        public void onFailure(Exception e) {
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e);
            onCompletion.run();
        }
    });
}
Also used : Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ClearScrollRequest(org.elasticsearch.action.search.ClearScrollRequest) ClearScrollResponse(org.elasticsearch.action.search.ClearScrollResponse) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)

Example 45 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class HunspellService method loadDictionary.

/**
     * Loads the hunspell dictionary for the given local.
     *
     * @param locale       The locale of the hunspell dictionary to be loaded.
     * @param nodeSettings The node level settings
     * @param env          The node environment (from which the conf path will be resolved)
     * @return The loaded Hunspell dictionary
     * @throws Exception when loading fails (due to IO errors or malformed dictionary files)
     */
private Dictionary loadDictionary(String locale, Settings nodeSettings, Environment env) throws Exception {
    if (logger.isDebugEnabled()) {
        logger.debug("Loading hunspell dictionary [{}]...", locale);
    }
    Path dicDir = hunspellDir.resolve(locale);
    if (FileSystemUtils.isAccessibleDirectory(dicDir, logger) == false) {
        throw new ElasticsearchException(String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale));
    }
    // merging node settings with hunspell dictionary specific settings
    Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
    nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale + "."));
    boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);
    Path[] affixFiles = FileSystemUtils.files(dicDir, "*.aff");
    if (affixFiles.length == 0) {
        throw new ElasticsearchException(String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale));
    }
    if (affixFiles.length != 1) {
        throw new ElasticsearchException(String.format(Locale.ROOT, "Too many affix files exist for hunspell dictionary [%s]", locale));
    }
    InputStream affixStream = null;
    Path[] dicFiles = FileSystemUtils.files(dicDir, "*.dic");
    List<InputStream> dicStreams = new ArrayList<>(dicFiles.length);
    try {
        for (int i = 0; i < dicFiles.length; i++) {
            dicStreams.add(Files.newInputStream(dicFiles[i]));
        }
        affixStream = Files.newInputStream(affixFiles[0]);
        try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
            return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
        }
    } catch (Exception e) {
        logger.error((Supplier<?>) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e);
        throw e;
    } finally {
        IOUtils.close(affixStream);
        IOUtils.close(dicStreams);
    }
}
Also used : Path(java.nio.file.Path) Dictionary(org.apache.lucene.analysis.hunspell.Dictionary) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) ElasticsearchException(org.elasticsearch.ElasticsearchException) SimpleFSDirectory(org.apache.lucene.store.SimpleFSDirectory) ElasticsearchException(org.elasticsearch.ElasticsearchException) IOException(java.io.IOException) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Settings(org.elasticsearch.common.settings.Settings) Directory(org.apache.lucene.store.Directory) SimpleFSDirectory(org.apache.lucene.store.SimpleFSDirectory)

Aggregations

Supplier (org.apache.logging.log4j.util.Supplier)94 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)91 IOException (java.io.IOException)55 ElasticsearchException (org.elasticsearch.ElasticsearchException)27 ArrayList (java.util.ArrayList)25 ClusterState (org.elasticsearch.cluster.ClusterState)21 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)21 TimeValue (org.elasticsearch.common.unit.TimeValue)14 HashMap (java.util.HashMap)12 Map (java.util.Map)11 Settings (org.elasticsearch.common.settings.Settings)11 TransportException (org.elasticsearch.transport.TransportException)11 List (java.util.List)10 ExecutionException (java.util.concurrent.ExecutionException)10 Index (org.elasticsearch.index.Index)10 CountDownLatch (java.util.concurrent.CountDownLatch)9 NotMasterException (org.elasticsearch.cluster.NotMasterException)8 ClusterStateUpdateResponse (org.elasticsearch.cluster.ack.ClusterStateUpdateResponse)8 ClusterBlockException (org.elasticsearch.cluster.block.ClusterBlockException)8 NoSuchFileException (java.nio.file.NoSuchFileException)7