Search in sources :

Example 11 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class Engine method getSegmentInfo.

protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
    ensureOpen();
    Map<String, Segment> segments = new HashMap<>();
    // first, go over and compute the search ones...
    Searcher searcher = acquireSearcher("segments");
    try {
        for (LeafReaderContext reader : searcher.reader().leaves()) {
            SegmentCommitInfo info = segmentReader(reader.reader()).getSegmentInfo();
            assert !segments.containsKey(info.info.name);
            Segment segment = new Segment(info.info.name);
            segment.search = true;
            segment.docCount = reader.reader().numDocs();
            segment.delDocCount = reader.reader().numDeletedDocs();
            segment.version = info.info.getVersion();
            segment.compound = info.info.getUseCompoundFile();
            try {
                segment.sizeInBytes = info.sizeInBytes();
            } catch (IOException e) {
                logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
            }
            final SegmentReader segmentReader = segmentReader(reader.reader());
            segment.memoryInBytes = segmentReader.ramBytesUsed();
            if (verbose) {
                segment.ramTree = Accountables.namedAccountable("root", segmentReader);
            }
            // TODO: add more fine grained mem stats values to per segment info here
            segments.put(info.info.name, segment);
        }
    } finally {
        searcher.close();
    }
    // now, correlate or add the committed ones...
    if (lastCommittedSegmentInfos != null) {
        SegmentInfos infos = lastCommittedSegmentInfos;
        for (SegmentCommitInfo info : infos) {
            Segment segment = segments.get(info.info.name);
            if (segment == null) {
                segment = new Segment(info.info.name);
                segment.search = false;
                segment.committed = true;
                segment.docCount = info.info.maxDoc();
                segment.delDocCount = info.getDelCount();
                segment.version = info.info.getVersion();
                segment.compound = info.info.getUseCompoundFile();
                try {
                    segment.sizeInBytes = info.sizeInBytes();
                } catch (IOException e) {
                    logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
                }
                segments.put(info.info.name, segment);
            } else {
                segment.committed = true;
            }
        }
    }
    Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
    Arrays.sort(segmentsArr, new Comparator<Segment>() {

        @Override
        public int compare(Segment o1, Segment o2) {
            return (int) (o1.getGeneration() - o2.getGeneration());
        }
    });
    return segmentsArr;
}
Also used : SegmentInfos(org.apache.lucene.index.SegmentInfos) SegmentCommitInfo(org.apache.lucene.index.SegmentCommitInfo) HashMap(java.util.HashMap) IndexSearcher(org.apache.lucene.search.IndexSearcher) IOException(java.io.IOException) SegmentReader(org.apache.lucene.index.SegmentReader) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 12 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class PeerRecoveryTargetService method doRecovery.

private void doRecovery(final long recoveryId) {
    final StartRecoveryRequest request;
    final CancellableThreads cancellableThreads;
    final RecoveryState.Timer timer;
    try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
        if (recoveryRef == null) {
            logger.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId);
            return;
        }
        final RecoveryTarget recoveryTarget = recoveryRef.target();
        cancellableThreads = recoveryTarget.cancellableThreads();
        timer = recoveryTarget.state().getTimer();
        try {
            assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node";
            request = getStartRecoveryRequest(recoveryTarget);
            logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId());
            recoveryTarget.indexShard().prepareForIndexRecovery();
        } catch (final Exception e) {
            // this will be logged as warning later on...
            logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e);
            onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true);
            return;
        }
    }
    try {
        logger.trace("{} starting recovery from {}", request.shardId(), request.sourceNode());
        final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
        cancellableThreads.execute(() -> responseHolder.set(transportService.submitRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, new FutureTransportResponseHandler<RecoveryResponse>() {

            @Override
            public RecoveryResponse newInstance() {
                return new RecoveryResponse();
            }
        }).txGet()));
        final RecoveryResponse recoveryResponse = responseHolder.get();
        final TimeValue recoveryTime = new TimeValue(timer.time());
        // do this through ongoing recoveries to remove it from the collection
        onGoingRecoveries.markRecoveryAsDone(recoveryId);
        if (logger.isTraceEnabled()) {
            StringBuilder sb = new StringBuilder();
            sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()).append("] ");
            sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n");
            sb.append("   phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with " + "total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]").append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']').append("\n");
            sb.append("         : reusing_files   [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with " + "total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
            sb.append("   phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
            sb.append("         : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log " + "operations").append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]").append("\n");
            logger.trace("{}", sb);
        } else {
            logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), request.sourceNode(), recoveryTime);
        }
    } catch (CancellableThreads.ExecutionCancelledException e) {
        logger.trace("recovery cancelled", e);
    } catch (Exception e) {
        if (logger.isTraceEnabled()) {
            logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e);
        }
        Throwable cause = ExceptionsHelper.unwrapCause(e);
        if (cause instanceof CancellableThreads.ExecutionCancelledException) {
            // this can also come from the source wrapped in a RemoteTransportException
            onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source has canceled the recovery", cause), false);
            return;
        }
        if (cause instanceof RecoveryEngineException) {
            // unwrap an exception that was thrown as part of the recovery
            cause = cause.getCause();
        }
        // do it twice, in case we have double transport exception
        cause = ExceptionsHelper.unwrapCause(cause);
        if (cause instanceof RecoveryEngineException) {
            // unwrap an exception that was thrown as part of the recovery
            cause = cause.getCause();
        }
        if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) {
            // if the target is not ready yet, retry
            retryRecovery(recoveryId, "remote shard not ready", recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
            return;
        }
        if (cause instanceof DelayRecoveryException) {
            retryRecovery(recoveryId, cause, recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
            return;
        }
        if (cause instanceof ConnectTransportException) {
            logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", request.shardId(), recoverySettings.retryDelayNetwork(), cause.getMessage());
            retryRecovery(recoveryId, cause.getMessage(), recoverySettings.retryDelayNetwork(), recoverySettings.activityTimeout());
            return;
        }
        if (cause instanceof AlreadyClosedException) {
            onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false);
            return;
        }
        onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true);
    }
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) RecoveryRef(org.elasticsearch.indices.recovery.RecoveriesCollection.RecoveryRef) Supplier(org.apache.logging.log4j.util.Supplier) TimeValue(org.elasticsearch.common.unit.TimeValue) FutureTransportResponseHandler(org.elasticsearch.transport.FutureTransportResponseHandler) CancellableThreads(org.elasticsearch.common.util.CancellableThreads) AtomicReference(java.util.concurrent.atomic.AtomicReference) IllegalIndexShardStateException(org.elasticsearch.index.shard.IllegalIndexShardStateException) ElasticsearchException(org.elasticsearch.ElasticsearchException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) NodeClosedException(org.elasticsearch.node.NodeClosedException) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) ElasticsearchTimeoutException(org.elasticsearch.ElasticsearchTimeoutException) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IOException(java.io.IOException) IllegalIndexShardStateException(org.elasticsearch.index.shard.IllegalIndexShardStateException) MapperException(org.elasticsearch.index.mapper.MapperException) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) ConnectTransportException(org.elasticsearch.transport.ConnectTransportException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 13 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class RecoverySourceHandler method phase1.

/**
     * Perform phase1 of the recovery operations. Once this {@link IndexCommit}
     * snapshot has been performed no commit operations (files being fsync'd)
     * are effectively allowed on this index until all recovery phases are done
     * <p>
     * Phase1 examines the segment files on the target node and copies over the
     * segments that are missing. Only segments that have the same size and
     * checksum can be reused
     */
public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
    cancellableThreads.checkForCancel();
    // Total size of segment files that are recovered
    long totalSize = 0;
    // Total size of segment files that were able to be re-used
    long existingTotalSize = 0;
    final Store store = shard.store();
    store.incRef();
    try {
        StopWatch stopWatch = new StopWatch().start();
        final Store.MetadataSnapshot recoverySourceMetadata;
        try {
            recoverySourceMetadata = store.getMetadata(snapshot);
        } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
            shard.failShard("recovery", ex);
            throw ex;
        }
        for (String name : snapshot.getFileNames()) {
            final StoreFileMetaData md = recoverySourceMetadata.get(name);
            if (md == null) {
                logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
                throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " + recoverySourceMetadata.asMap().size() + " files", name);
            }
        }
        // Generate a "diff" of all the identical, different, and missing
        // segment files on the target node, using the existing files on
        // the source node
        String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
        String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
        final boolean recoverWithSyncId = recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
        if (recoverWithSyncId) {
            final long numDocsTarget = request.metadataSnapshot().getNumDocs();
            final long numDocsSource = recoverySourceMetadata.getNumDocs();
            if (numDocsTarget != numDocsSource) {
                throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + "of docs differ: " + numDocsSource + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsTarget + "(" + request.targetNode().getName() + ")");
            }
            // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.
            // so we don't return here
            logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", recoverySourceSyncId);
        } else {
            final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
            for (StoreFileMetaData md : diff.identical) {
                response.phase1ExistingFileNames.add(md.name());
                response.phase1ExistingFileSizes.add(md.length());
                existingTotalSize += md.length();
                if (logger.isTraceEnabled()) {
                    logger.trace("recovery [phase1]: not recovering [{}], exist in local store and has checksum [{}]," + " size [{}]", md.name(), md.checksum(), md.length());
                }
                totalSize += md.length();
            }
            List<StoreFileMetaData> phase1Files = new ArrayList<>(diff.different.size() + diff.missing.size());
            phase1Files.addAll(diff.different);
            phase1Files.addAll(diff.missing);
            for (StoreFileMetaData md : phase1Files) {
                if (request.metadataSnapshot().asMap().containsKey(md.name())) {
                    logger.trace("recovery [phase1]: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", md.name(), request.metadataSnapshot().asMap().get(md.name()), md);
                } else {
                    logger.trace("recovery [phase1]: recovering [{}], does not exist in remote", md.name());
                }
                response.phase1FileNames.add(md.name());
                response.phase1FileSizes.add(md.length());
                totalSize += md.length();
            }
            response.phase1TotalSize = totalSize;
            response.phase1ExistingTotalSize = existingTotalSize;
            logger.trace("recovery [phase1]: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
            cancellableThreads.execute(() -> recoveryTarget.receiveFileInfo(response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, translogView.totalOperations()));
            // How many bytes we've copied since we last called RateLimiter.pause
            final Function<StoreFileMetaData, OutputStream> outputStreamFactories = md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView), chunkSizeInBytes);
            sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
            // are deleted
            try {
                cancellableThreads.executeIO(() -> recoveryTarget.cleanFiles(translogView.totalOperations(), recoverySourceMetadata));
            } catch (RemoteTransportException | IOException targetException) {
                final IOException corruptIndexException;
                //   - maybe due to old segments without checksums or length only checks
                if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException)) != null) {
                    try {
                        final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
                        StoreFileMetaData[] metadata = StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]);
                        ArrayUtil.timSort(metadata, (o1, o2) -> {
                            // check small files first
                            return Long.compare(o1.length(), o2.length());
                        });
                        for (StoreFileMetaData md : metadata) {
                            cancellableThreads.checkForCancel();
                            logger.debug("checking integrity for file {} after remove corruption exception", md);
                            if (store.checkIntegrityNoException(md) == false) {
                                // we are corrupted on the primary -- fail!
                                shard.failShard("recovery", corruptIndexException);
                                logger.warn("Corrupted file detected {} checksum mismatch", md);
                                throw corruptIndexException;
                            }
                        }
                    } catch (IOException ex) {
                        targetException.addSuppressed(ex);
                        throw targetException;
                    }
                    // corruption has happened on the way to replica
                    RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null);
                    exception.addSuppressed(targetException);
                    logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", shard.shardId(), request.targetNode()), corruptIndexException);
                    throw exception;
                } else {
                    throw targetException;
                }
            }
        }
        logger.trace("recovery [phase1]: took [{}]", stopWatch.totalTime());
        response.phase1Time = stopWatch.totalTime().millis();
    } catch (Exception e) {
        throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
    } finally {
        store.decRef();
    }
}
Also used : IndexCommit(org.apache.lucene.index.IndexCommit) CancellableThreads(org.elasticsearch.common.util.CancellableThreads) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) Nullable(org.elasticsearch.common.Nullable) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Function(java.util.function.Function) Supplier(java.util.function.Supplier) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) BufferedOutputStream(java.io.BufferedOutputStream) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) Settings(org.elasticsearch.common.settings.Settings) SequenceNumbersService(org.elasticsearch.index.seqno.SequenceNumbersService) Store(org.elasticsearch.index.store.Store) LocalCheckpointTracker(org.elasticsearch.index.seqno.LocalCheckpointTracker) Streams(org.elasticsearch.common.io.Streams) StreamSupport(java.util.stream.StreamSupport) IOContext(org.apache.lucene.store.IOContext) Releasable(org.elasticsearch.common.lease.Releasable) Loggers(org.elasticsearch.common.logging.Loggers) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) OutputStream(java.io.OutputStream) ArrayUtil(org.apache.lucene.util.ArrayUtil) IndexShardState(org.elasticsearch.index.shard.IndexShardState) IndexInput(org.apache.lucene.store.IndexInput) IndexShard(org.elasticsearch.index.shard.IndexShard) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) StopWatch(org.elasticsearch.common.StopWatch) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) List(java.util.List) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) Translog(org.elasticsearch.index.translog.Translog) RateLimiter(org.apache.lucene.store.RateLimiter) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) ArrayList(java.util.ArrayList) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Store(org.elasticsearch.index.store.Store) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) Supplier(java.util.function.Supplier) BufferedOutputStream(java.io.BufferedOutputStream) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) StopWatch(org.elasticsearch.common.StopWatch) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException)

Example 14 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class RepositoriesService method createRepository.

/**
     * Creates repository holder
     */
private Repository createRepository(RepositoryMetaData repositoryMetaData) {
    logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name());
    Repository.Factory factory = typesRegistry.get(repositoryMetaData.type());
    if (factory == null) {
        throw new RepositoryException(repositoryMetaData.name(), "repository type [" + repositoryMetaData.type() + "] does not exist");
    }
    try {
        Repository repository = factory.create(repositoryMetaData);
        repository.start();
        return repository;
    } catch (Exception e) {
        logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
        throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e);
    }
}
Also used : Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IOException(java.io.IOException)

Example 15 with ParameterizedMessage

use of org.apache.logging.log4j.message.ParameterizedMessage in project elasticsearch by elastic.

the class RepositoriesService method registerRepository.

/**
     * Registers new repository in the cluster
     * <p>
     * This method can be only called on the master node. It tries to create a new repository on the master
     * and if it was successful it adds new repository to cluster metadata.
     *
     * @param request  register repository request
     * @param listener register repository listener
     */
public void registerRepository(final RegisterRepositoryRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
    final RepositoryMetaData newRepositoryMetaData = new RepositoryMetaData(request.name, request.type, request.settings);
    final ActionListener<ClusterStateUpdateResponse> registrationListener;
    if (request.verify) {
        registrationListener = new VerifyingRegisterRepositoryListener(request.name, listener);
    } else {
        registrationListener = listener;
    }
    clusterService.submitStateUpdateTask(request.cause, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, registrationListener) {

        @Override
        protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
            return new ClusterStateUpdateResponse(acknowledged);
        }

        @Override
        public ClusterState execute(ClusterState currentState) throws IOException {
            ensureRepositoryNotInUse(currentState, request.name);
            // Trying to create the new repository on master to make sure it works
            if (!registerRepository(newRepositoryMetaData)) {
                // The new repository has the same settings as the old one - ignore
                return currentState;
            }
            MetaData metaData = currentState.metaData();
            MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
            RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
            if (repositories == null) {
                logger.info("put repository [{}]", request.name);
                repositories = new RepositoriesMetaData(new RepositoryMetaData(request.name, request.type, request.settings));
            } else {
                boolean found = false;
                List<RepositoryMetaData> repositoriesMetaData = new ArrayList<>(repositories.repositories().size() + 1);
                for (RepositoryMetaData repositoryMetaData : repositories.repositories()) {
                    if (repositoryMetaData.name().equals(newRepositoryMetaData.name())) {
                        found = true;
                        repositoriesMetaData.add(newRepositoryMetaData);
                    } else {
                        repositoriesMetaData.add(repositoryMetaData);
                    }
                }
                if (!found) {
                    logger.info("put repository [{}]", request.name);
                    repositoriesMetaData.add(new RepositoryMetaData(request.name, request.type, request.settings));
                } else {
                    logger.info("update repository [{}]", request.name);
                }
                repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()]));
            }
            mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories);
            return ClusterState.builder(currentState).metaData(mdBuilder).build();
        }

        @Override
        public void onFailure(String source, Exception e) {
            logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e);
            super.onFailure(source, e);
        }

        @Override
        public boolean mustAck(DiscoveryNode discoveryNode) {
            return discoveryNode.isMasterNode();
        }
    });
}
Also used : RepositoriesMetaData(org.elasticsearch.cluster.metadata.RepositoriesMetaData) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) IOException(java.io.IOException) IOException(java.io.IOException) MetaData(org.elasticsearch.cluster.metadata.MetaData) RepositoriesMetaData(org.elasticsearch.cluster.metadata.RepositoriesMetaData) RepositoryMetaData(org.elasticsearch.cluster.metadata.RepositoryMetaData) RepositoryMetaData(org.elasticsearch.cluster.metadata.RepositoryMetaData) ArrayList(java.util.ArrayList) List(java.util.List) Supplier(org.apache.logging.log4j.util.Supplier) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ClusterStateUpdateResponse(org.elasticsearch.cluster.ack.ClusterStateUpdateResponse)

Aggregations

ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)131 Supplier (org.apache.logging.log4j.util.Supplier)90 IOException (java.io.IOException)75 ElasticsearchException (org.elasticsearch.ElasticsearchException)38 ArrayList (java.util.ArrayList)28 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)26 ClusterState (org.elasticsearch.cluster.ClusterState)25 HashMap (java.util.HashMap)16 TimeValue (org.elasticsearch.common.unit.TimeValue)14 TransportException (org.elasticsearch.transport.TransportException)14 List (java.util.List)13 Supplier (java.util.function.Supplier)13 Map (java.util.Map)12 CountDownLatch (java.util.concurrent.CountDownLatch)12 ExecutionException (java.util.concurrent.ExecutionException)12 Settings (org.elasticsearch.common.settings.Settings)12 EsRejectedExecutionException (org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)12 AbstractRunnable (org.elasticsearch.common.util.concurrent.AbstractRunnable)11 Index (org.elasticsearch.index.Index)11 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)10