Search in sources :

Example 51 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class Store method renameTempFilesSafe.

/**
     * Renames all the given files from the key of the map to the
     * value of the map. All successfully renamed files are removed from the map in-place.
     */
public void renameTempFilesSafe(Map<String, String> tempFileMap) throws IOException {
    // this works just like a lucene commit - we rename all temp files and once we successfully
    // renamed all the segments we rename the commit to ensure we don't leave half baked commits behind.
    final Map.Entry<String, String>[] entries = tempFileMap.entrySet().toArray(new Map.Entry[tempFileMap.size()]);
    ArrayUtil.timSort(entries, new Comparator<Map.Entry<String, String>>() {

        @Override
        public int compare(Map.Entry<String, String> o1, Map.Entry<String, String> o2) {
            String left = o1.getValue();
            String right = o2.getValue();
            if (left.startsWith(IndexFileNames.SEGMENTS) || right.startsWith(IndexFileNames.SEGMENTS)) {
                if (left.startsWith(IndexFileNames.SEGMENTS) == false) {
                    return -1;
                } else if (right.startsWith(IndexFileNames.SEGMENTS) == false) {
                    return 1;
                }
            }
            return left.compareTo(right);
        }
    });
    metadataLock.writeLock().lock();
    // get exceptions if files are still open.
    try (Lock writeLock = directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (Map.Entry<String, String> entry : entries) {
            String tempFile = entry.getKey();
            String origFile = entry.getValue();
            // first, go and delete the existing ones
            try {
                directory.deleteFile(origFile);
            } catch (FileNotFoundException | NoSuchFileException e) {
            } catch (Exception ex) {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
            }
            // now, rename the files... and fail it it won't work
            directory.rename(tempFile, origFile);
            final String remove = tempFileMap.remove(tempFile);
            assert remove != null;
        }
        directory.syncMetaData();
    } finally {
        metadataLock.writeLock().unlock();
    }
}
Also used : FileNotFoundException(java.io.FileNotFoundException) NoSuchFileException(java.nio.file.NoSuchFileException) IndexNotFoundException(org.apache.lucene.index.IndexNotFoundException) ElasticsearchException(org.elasticsearch.ElasticsearchException) NoSuchFileException(java.nio.file.NoSuchFileException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) AccessDeniedException(java.nio.file.AccessDeniedException) IOException(java.io.IOException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Lock(org.apache.lucene.store.Lock) ShardLock(org.elasticsearch.env.ShardLock) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Map(java.util.Map) HashMap(java.util.HashMap) Collections.emptyMap(java.util.Collections.emptyMap) Collections.unmodifiableMap(java.util.Collections.unmodifiableMap)

Example 52 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class Store method cleanupAndVerify.

/**
     * This method deletes every file in this store that is not contained in the given source meta data or is a
     * legacy checksum file. After the delete it pulls the latest metadata snapshot from the store and compares it
     * to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown
     *
     * @param reason         the reason for this cleanup operation logged for each deleted file
     * @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around.
     * @throws IOException           if an IOException occurs
     * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup.
     */
public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException {
    metadataLock.writeLock().lock();
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        final StoreDirectory dir = directory;
        for (String existingFile : dir.listAll()) {
            if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) {
                // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
                continue;
            }
            try {
                dir.deleteFile(reason, existingFile);
            // FNF should not happen since we hold a write lock?
            } catch (IOException ex) {
                if (existingFile.startsWith(IndexFileNames.SEGMENTS) || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                    // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
                    throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
                }
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
            // ignore, we don't really care, will get deleted later on
            }
        }
        final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null);
        verifyAfterCleanup(sourceMetaData, metadataOrEmpty);
    } finally {
        metadataLock.writeLock().unlock();
    }
}
Also used : Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IOException(java.io.IOException) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Lock(org.apache.lucene.store.Lock) ShardLock(org.elasticsearch.env.ShardLock)

Example 53 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class RecoveryTarget method closeInternal.

@Override
protected void closeInternal() {
    try {
        // clean open index outputs
        Iterator<Entry<String, IndexOutput>> iterator = openIndexOutputs.entrySet().iterator();
        while (iterator.hasNext()) {
            Map.Entry<String, IndexOutput> entry = iterator.next();
            logger.trace("closing IndexOutput file [{}]", entry.getValue());
            try {
                entry.getValue().close();
            } catch (Exception e) {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
            }
            iterator.remove();
        }
        // trash temporary files
        for (String file : tempFileNames.keySet()) {
            logger.trace("cleaning temporary file [{}]", file);
            store.deleteQuiet(file);
        }
    } finally {
        // free store. increment happens in constructor
        store.decRef();
        indexShard.recoveryStats().decCurrentAsTarget();
        closedLatch.countDown();
    }
}
Also used : Entry(java.util.Map.Entry) IndexOutput(org.apache.lucene.store.IndexOutput) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) ElasticsearchException(org.elasticsearch.ElasticsearchException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) TimeoutException(java.util.concurrent.TimeoutException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException)

Example 54 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class TransportShardMultiTermsVectorAction method shardOperation.

@Override
protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) {
    final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
    final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    final IndexShard indexShard = indexService.getShard(shardId.id());
    for (int i = 0; i < request.locations.size(); i++) {
        TermVectorsRequest termVectorsRequest = request.requests.get(i);
        try {
            TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
            response.add(request.locations.get(i), termVectorsResponse);
        } catch (Exception t) {
            if (TransportActions.isShardNotAvailableException(t)) {
                throw (ElasticsearchException) t;
            } else {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
                response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
            }
        }
    }
    return response;
}
Also used : IndexService(org.elasticsearch.index.IndexService) IndexShard(org.elasticsearch.index.shard.IndexShard) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ElasticsearchException(org.elasticsearch.ElasticsearchException)

Example 55 with Supplier

use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.

the class RepositoriesService method applyClusterState.

/**
     * Checks if new repositories appeared in or disappeared from cluster metadata and updates current list of
     * repositories accordingly.
     *
     * @param event cluster changed event
     */
@Override
public void applyClusterState(ClusterChangedEvent event) {
    try {
        RepositoriesMetaData oldMetaData = event.previousState().getMetaData().custom(RepositoriesMetaData.TYPE);
        RepositoriesMetaData newMetaData = event.state().getMetaData().custom(RepositoriesMetaData.TYPE);
        // Check if repositories got changed
        if ((oldMetaData == null && newMetaData == null) || (oldMetaData != null && oldMetaData.equals(newMetaData))) {
            return;
        }
        logger.trace("processing new index repositories for state version [{}]", event.state().version());
        Map<String, Repository> survivors = new HashMap<>();
        // First, remove repositories that are no longer there
        for (Map.Entry<String, Repository> entry : repositories.entrySet()) {
            if (newMetaData == null || newMetaData.repository(entry.getKey()) == null) {
                logger.debug("unregistering repository [{}]", entry.getKey());
                closeRepository(entry.getValue());
            } else {
                survivors.put(entry.getKey(), entry.getValue());
            }
        }
        Map<String, Repository> builder = new HashMap<>();
        if (newMetaData != null) {
            // Now go through all repositories and update existing or create missing
            for (RepositoryMetaData repositoryMetaData : newMetaData.repositories()) {
                Repository repository = survivors.get(repositoryMetaData.name());
                if (repository != null) {
                    // Found previous version of this repository
                    RepositoryMetaData previousMetadata = repository.getMetadata();
                    if (previousMetadata.type().equals(repositoryMetaData.type()) == false || previousMetadata.settings().equals(repositoryMetaData.settings()) == false) {
                        // Previous version is different from the version in settings
                        logger.debug("updating repository [{}]", repositoryMetaData.name());
                        closeRepository(repository);
                        repository = null;
                        try {
                            repository = createRepository(repositoryMetaData);
                        } catch (RepositoryException ex) {
                            // TODO: this catch is bogus, it means the old repo is already closed,
                            // but we have nothing to replace it
                            logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex);
                        }
                    }
                } else {
                    try {
                        repository = createRepository(repositoryMetaData);
                    } catch (RepositoryException ex) {
                        logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
                    }
                }
                if (repository != null) {
                    logger.debug("registering repository [{}]", repositoryMetaData.name());
                    builder.put(repositoryMetaData.name(), repository);
                }
            }
        }
        repositories = Collections.unmodifiableMap(builder);
    } catch (Exception ex) {
        logger.warn("failure updating cluster state ", ex);
    }
}
Also used : RepositoriesMetaData(org.elasticsearch.cluster.metadata.RepositoriesMetaData) HashMap(java.util.HashMap) RepositoryMetaData(org.elasticsearch.cluster.metadata.RepositoryMetaData) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) HashMap(java.util.HashMap) Map(java.util.Map) IOException(java.io.IOException)

Aggregations

Supplier (org.apache.logging.log4j.util.Supplier)94 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)91 IOException (java.io.IOException)55 ElasticsearchException (org.elasticsearch.ElasticsearchException)27 ArrayList (java.util.ArrayList)25 ClusterState (org.elasticsearch.cluster.ClusterState)21 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)21 TimeValue (org.elasticsearch.common.unit.TimeValue)14 HashMap (java.util.HashMap)12 Map (java.util.Map)11 Settings (org.elasticsearch.common.settings.Settings)11 TransportException (org.elasticsearch.transport.TransportException)11 List (java.util.List)10 ExecutionException (java.util.concurrent.ExecutionException)10 Index (org.elasticsearch.index.Index)10 CountDownLatch (java.util.concurrent.CountDownLatch)9 NotMasterException (org.elasticsearch.cluster.NotMasterException)8 ClusterStateUpdateResponse (org.elasticsearch.cluster.ack.ClusterStateUpdateResponse)8 ClusterBlockException (org.elasticsearch.cluster.block.ClusterBlockException)8 NoSuchFileException (java.nio.file.NoSuchFileException)7