Search in sources :

Example 1 with BlobStoreIndexShardSnapshot

use of org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot in project crate by crate.

the class BlobStoreRepository method snapshotShard.

@Override
public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, boolean writeShardGens, ActionListener<String> listener) {
    final ShardId shardId = store.shardId();
    final long startTime = threadPool.absoluteTimeInMillis();
    try {
        final String generation = snapshotStatus.generation();
        LOGGER.debug("[{}] [{}] snapshot to [{}] [{}] ...", shardId, snapshotId, metadata.name(), generation);
        final BlobContainer shardContainer = shardContainer(indexId, shardId);
        final Set<String> blobs;
        if (generation == null) {
            try {
                blobs = shardContainer.listBlobsByPrefix(INDEX_FILE_PREFIX).keySet();
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
            }
        } else {
            blobs = Collections.singleton(INDEX_FILE_PREFIX + generation);
        }
        Tuple<BlobStoreIndexShardSnapshots, String> tuple = buildBlobStoreIndexShardSnapshots(blobs, shardContainer, generation);
        BlobStoreIndexShardSnapshots snapshots = tuple.v1();
        final String fileListGeneration = tuple.v2();
        if (snapshots.snapshots().stream().anyMatch(sf -> sf.snapshot().equals(snapshotId.getName()))) {
            throw new IndexShardSnapshotFailedException(shardId, "Duplicate snapshot name [" + snapshotId.getName() + "] detected, aborting");
        }
        final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles = new ArrayList<>();
        final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> filesToSnapshot = new LinkedBlockingQueue<>();
        store.incRef();
        final Collection<String> fileNames;
        final Store.MetadataSnapshot metadataFromStore;
        try {
            // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should
            try {
                LOGGER.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit);
                metadataFromStore = store.getMetadata(snapshotIndexCommit);
                fileNames = snapshotIndexCommit.getFileNames();
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e);
            }
        } finally {
            store.decRef();
        }
        int indexIncrementalFileCount = 0;
        int indexTotalNumberOfFiles = 0;
        long indexIncrementalSize = 0;
        long indexTotalFileCount = 0;
        for (String fileName : fileNames) {
            if (snapshotStatus.isAborted()) {
                LOGGER.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
                throw new IndexShardSnapshotFailedException(shardId, "Aborted");
            }
            LOGGER.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
            final StoreFileMetadata md = metadataFromStore.get(fileName);
            BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null;
            List<BlobStoreIndexShardSnapshot.FileInfo> filesInfo = snapshots.findPhysicalIndexFiles(fileName);
            if (filesInfo != null) {
                for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) {
                    if (fileInfo.isSame(md)) {
                        // a commit point file with the same name, size and checksum was already copied to repository
                        // we will reuse it for this snapshot
                        existingFileInfo = fileInfo;
                        break;
                    }
                }
            }
            indexTotalFileCount += md.length();
            indexTotalNumberOfFiles++;
            if (existingFileInfo == null) {
                indexIncrementalFileCount++;
                indexIncrementalSize += md.length();
                // create a new FileInfo
                BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize());
                indexCommitPointFiles.add(snapshotFileInfo);
                filesToSnapshot.add(snapshotFileInfo);
            } else {
                indexCommitPointFiles.add(existingFileInfo);
            }
        }
        snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount);
        assert indexIncrementalFileCount == filesToSnapshot.size();
        final StepListener<Collection<Void>> allFilesUploadedListener = new StepListener<>();
        allFilesUploadedListener.whenComplete(v -> {
            final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration());
            // now create and write the commit point
            final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), lastSnapshotStatus.getIndexVersion(), indexCommitPointFiles, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), lastSnapshotStatus.getIncrementalFileCount(), lastSnapshotStatus.getIncrementalSize());
            LOGGER.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
            try {
                indexShardSnapshotFormat.write(snapshot, shardContainer, snapshotId.getUUID(), false);
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
            }
            // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
            List<SnapshotFiles> newSnapshotsList = new ArrayList<>();
            newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));
            for (SnapshotFiles point : snapshots) {
                newSnapshotsList.add(point);
            }
            final List<String> blobsToDelete;
            final String indexGeneration;
            if (writeShardGens) {
                indexGeneration = UUIDs.randomBase64UUID();
                blobsToDelete = Collections.emptyList();
            } else {
                indexGeneration = Long.toString(Long.parseLong(fileListGeneration) + 1);
                // Delete all previous index-N blobs
                blobsToDelete = blobs.stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList());
                assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))).max().orElse(-1L) < Long.parseLong(indexGeneration) : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + "] when deleting index-N blobs " + blobsToDelete;
            }
            try {
                writeShardIndexBlob(shardContainer, indexGeneration, new BlobStoreIndexShardSnapshots(newSnapshotsList));
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e);
            }
            if (writeShardGens == false) {
                try {
                    shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete);
                } catch (IOException e) {
                    LOGGER.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", snapshotId, shardId), e);
                }
            }
            snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), indexGeneration);
            listener.onResponse(indexGeneration);
        }, listener::onFailure);
        if (indexIncrementalFileCount == 0) {
            allFilesUploadedListener.onResponse(Collections.emptyList());
            return;
        }
        final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
        int maximumPoolSize = executor instanceof ThreadPoolExecutor ? ((ThreadPoolExecutor) executor).getMaximumPoolSize() : 1;
        // Start as many workers as fit into the snapshot pool at once at the most
        final int workers = Math.min(maximumPoolSize, indexIncrementalFileCount);
        final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, workers, allFilesUploadedListener);
        for (int i = 0; i < workers; ++i) {
            executor.execute(ActionRunnable.run(filesListener, () -> {
                BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS);
                if (snapshotFileInfo != null) {
                    store.incRef();
                    try {
                        do {
                            snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store);
                            snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS);
                        } while (snapshotFileInfo != null);
                    } finally {
                        store.decRef();
                    }
                }
            }));
        }
    } catch (Exception e) {
        listener.onFailure(e);
    }
}
Also used : IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) ShardId(org.elasticsearch.index.shard.ShardId) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) ByteSizeUnit(org.elasticsearch.common.unit.ByteSizeUnit) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Map(java.util.Map) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) RateLimitingInputStream(org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream) IOContext(org.apache.lucene.store.IOContext) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) BlockingQueue(java.util.concurrent.BlockingQueue) StandardCharsets(java.nio.charset.StandardCharsets) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Stream(java.util.stream.Stream) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) BlobStore(org.elasticsearch.common.blobstore.BlobStore) SnapshotException(org.elasticsearch.snapshots.SnapshotException) FileInfo.canonicalName(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) IndexCommit(org.apache.lucene.index.IndexCommit) XContentFactory(org.elasticsearch.common.xcontent.XContentFactory) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Tuple(io.crate.common.collections.Tuple) ShardGenerations(org.elasticsearch.repositories.ShardGenerations) ClusterService(org.elasticsearch.cluster.service.ClusterService) SnapshotShardFailure(org.elasticsearch.snapshots.SnapshotShardFailure) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) LoggingDeprecationHandler(org.elasticsearch.common.xcontent.LoggingDeprecationHandler) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) Metadata(org.elasticsearch.cluster.metadata.Metadata) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Store(org.elasticsearch.index.store.Store) Nullable(javax.annotation.Nullable) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) Executor(java.util.concurrent.Executor) IOException(java.io.IOException) XContentParser(org.elasticsearch.common.xcontent.XContentParser) AtomicLong(java.util.concurrent.atomic.AtomicLong) CounterMetric(org.elasticsearch.common.metrics.CounterMetric) ActionListener(org.elasticsearch.action.ActionListener) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) RepositoryMetadata(org.elasticsearch.cluster.metadata.RepositoryMetadata) Settings(org.elasticsearch.common.settings.Settings) Locale(java.util.Locale) Streams(org.elasticsearch.common.io.Streams) ThreadPool(org.elasticsearch.threadpool.ThreadPool) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) ActionRunnable(org.elasticsearch.action.ActionRunnable) StepListener(org.elasticsearch.action.StepListener) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) RepositoryException(org.elasticsearch.repositories.RepositoryException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) NotXContentException(org.elasticsearch.common.compress.NotXContentException) Setting(org.elasticsearch.common.settings.Setting) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BlobMetadata(org.elasticsearch.common.blobstore.BlobMetadata) BytesReference(org.elasticsearch.common.bytes.BytesReference) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) Version(org.elasticsearch.Version) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) RepositoryData(org.elasticsearch.repositories.RepositoryData) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) XContentType(org.elasticsearch.common.xcontent.XContentType) IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) Index(org.elasticsearch.index.Index) Lucene(org.elasticsearch.common.lucene.Lucene) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexId(org.elasticsearch.repositories.IndexId) FilterInputStream(java.io.FilterInputStream) RepositoriesMetadata(org.elasticsearch.cluster.metadata.RepositoriesMetadata) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) BlobPath(org.elasticsearch.common.blobstore.BlobPath) IndexOutput(org.apache.lucene.store.IndexOutput) Numbers(org.elasticsearch.common.Numbers) Repository(org.elasticsearch.repositories.Repository) SnapshotsService(org.elasticsearch.snapshots.SnapshotsService) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) AbstractLifecycleComponent(org.elasticsearch.common.component.AbstractLifecycleComponent) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) RepositoryOperation(org.elasticsearch.repositories.RepositoryOperation) Snapshot(org.elasticsearch.snapshots.Snapshot) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) ArrayList(java.util.ArrayList) BlobStore(org.elasticsearch.common.blobstore.BlobStore) Store(org.elasticsearch.index.store.Store) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ShardId(org.elasticsearch.index.shard.ShardId) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) Executor(java.util.concurrent.Executor) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotException(org.elasticsearch.snapshots.SnapshotException) IOException(java.io.IOException) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) RepositoryException(org.elasticsearch.repositories.RepositoryException) NotXContentException(org.elasticsearch.common.compress.NotXContentException) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) Collection(java.util.Collection) StepListener(org.elasticsearch.action.StepListener) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 2 with BlobStoreIndexShardSnapshot

use of org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot in project elasticsearch by elastic.

the class BlobStoreRepository method getShardSnapshotStatus.

@Override
public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) {
    Context context = new Context(snapshotId, version, indexId, shardId);
    BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot();
    IndexShardSnapshotStatus status = new IndexShardSnapshotStatus();
    status.updateStage(IndexShardSnapshotStatus.Stage.DONE);
    status.startTime(snapshot.startTime());
    status.files(snapshot.numberOfFiles(), snapshot.totalSize());
    // The snapshot is done which means the number of processed files is the same as total
    status.processedFiles(snapshot.numberOfFiles(), snapshot.totalSize());
    status.time(snapshot.time());
    return status;
}
Also used : IOContext(org.apache.lucene.store.IOContext) IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot)

Example 3 with BlobStoreIndexShardSnapshot

use of org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot in project crate by crate.

the class BlobStoreRepository method restoreShard.

@Override
public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState, ActionListener<Void> listener) {
    final ShardId shardId = store.shardId();
    final ActionListener<Void> restoreListener = ActionListener.delegateResponse(listener, (l, e) -> l.onFailure(new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e)));
    final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
    final BlobContainer container = shardContainer(indexId, snapshotShardId);
    executor.execute(ActionRunnable.wrap(restoreListener, l -> {
        final BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(container, snapshotId);
        final SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());
        new FileRestoreContext(metadata.name(), shardId, snapshotId, recoveryState) {

            @Override
            protected void restoreFiles(List<BlobStoreIndexShardSnapshot.FileInfo> filesToRecover, Store store, ActionListener<Void> listener) {
                if (filesToRecover.isEmpty()) {
                    listener.onResponse(null);
                } else {
                    // Start as many workers as fit into the snapshot pool at once at the most
                    int maxPoolSize = executor instanceof ThreadPoolExecutor ? ((ThreadPoolExecutor) executor).getMaximumPoolSize() : 1;
                    final int workers = Math.min(maxPoolSize, snapshotFiles.indexFiles().size());
                    final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> files = new LinkedBlockingQueue<>(filesToRecover);
                    final ActionListener<Void> allFilesListener = fileQueueListener(files, workers, ActionListener.map(listener, v -> null));
                    // restore the files from the snapshot to the Lucene store
                    for (int i = 0; i < workers; ++i) {
                        executor.execute(ActionRunnable.run(allFilesListener, () -> {
                            store.incRef();
                            try {
                                BlobStoreIndexShardSnapshot.FileInfo fileToRecover;
                                while ((fileToRecover = files.poll(0L, TimeUnit.MILLISECONDS)) != null) {
                                    restoreFile(fileToRecover, store);
                                }
                            } finally {
                                store.decRef();
                            }
                        }));
                    }
                }
            }

            private void restoreFile(BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store store) throws IOException {
                boolean success = false;
                try (InputStream stream = maybeRateLimit(new SlicedInputStream(fileInfo.numberOfParts()) {

                    @Override
                    protected InputStream openSlice(long slice) throws IOException {
                        return container.readBlob(fileInfo.partName(slice));
                    }
                }, restoreRateLimiter, restoreRateLimitingTimeInNanos)) {
                    try (IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) {
                        final byte[] buffer = new byte[BUFFER_SIZE];
                        int length;
                        while ((length = stream.read(buffer)) > 0) {
                            indexOutput.writeBytes(buffer, 0, length);
                            recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), length);
                        }
                        Store.verify(indexOutput);
                        indexOutput.close();
                        store.directory().sync(Collections.singleton(fileInfo.physicalName()));
                        success = true;
                    } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
                        try {
                            store.markStoreCorrupted(ex);
                        } catch (IOException e) {
                            LOGGER.warn("store cannot be marked as corrupted", e);
                        }
                        throw ex;
                    } finally {
                        if (success == false) {
                            store.deleteQuiet(fileInfo.physicalName());
                        }
                    }
                }
            }
        }.restore(snapshotFiles, store, l);
    }));
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) ByteSizeUnit(org.elasticsearch.common.unit.ByteSizeUnit) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Map(java.util.Map) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) RateLimitingInputStream(org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream) IOContext(org.apache.lucene.store.IOContext) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) BlockingQueue(java.util.concurrent.BlockingQueue) StandardCharsets(java.nio.charset.StandardCharsets) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Stream(java.util.stream.Stream) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) BlobStore(org.elasticsearch.common.blobstore.BlobStore) SnapshotException(org.elasticsearch.snapshots.SnapshotException) FileInfo.canonicalName(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) IndexCommit(org.apache.lucene.index.IndexCommit) XContentFactory(org.elasticsearch.common.xcontent.XContentFactory) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Tuple(io.crate.common.collections.Tuple) ShardGenerations(org.elasticsearch.repositories.ShardGenerations) ClusterService(org.elasticsearch.cluster.service.ClusterService) SnapshotShardFailure(org.elasticsearch.snapshots.SnapshotShardFailure) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) LoggingDeprecationHandler(org.elasticsearch.common.xcontent.LoggingDeprecationHandler) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) Metadata(org.elasticsearch.cluster.metadata.Metadata) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Store(org.elasticsearch.index.store.Store) Nullable(javax.annotation.Nullable) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) Executor(java.util.concurrent.Executor) IOException(java.io.IOException) XContentParser(org.elasticsearch.common.xcontent.XContentParser) AtomicLong(java.util.concurrent.atomic.AtomicLong) CounterMetric(org.elasticsearch.common.metrics.CounterMetric) ActionListener(org.elasticsearch.action.ActionListener) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) RepositoryMetadata(org.elasticsearch.cluster.metadata.RepositoryMetadata) Settings(org.elasticsearch.common.settings.Settings) Locale(java.util.Locale) Streams(org.elasticsearch.common.io.Streams) ThreadPool(org.elasticsearch.threadpool.ThreadPool) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) ActionRunnable(org.elasticsearch.action.ActionRunnable) StepListener(org.elasticsearch.action.StepListener) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) RepositoryException(org.elasticsearch.repositories.RepositoryException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) NotXContentException(org.elasticsearch.common.compress.NotXContentException) Setting(org.elasticsearch.common.settings.Setting) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BlobMetadata(org.elasticsearch.common.blobstore.BlobMetadata) BytesReference(org.elasticsearch.common.bytes.BytesReference) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) Version(org.elasticsearch.Version) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) RepositoryData(org.elasticsearch.repositories.RepositoryData) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) XContentType(org.elasticsearch.common.xcontent.XContentType) IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) Index(org.elasticsearch.index.Index) Lucene(org.elasticsearch.common.lucene.Lucene) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexId(org.elasticsearch.repositories.IndexId) FilterInputStream(java.io.FilterInputStream) RepositoriesMetadata(org.elasticsearch.cluster.metadata.RepositoriesMetadata) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) BlobPath(org.elasticsearch.common.blobstore.BlobPath) IndexOutput(org.apache.lucene.store.IndexOutput) Numbers(org.elasticsearch.common.Numbers) Repository(org.elasticsearch.repositories.Repository) SnapshotsService(org.elasticsearch.snapshots.SnapshotsService) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) AbstractLifecycleComponent(org.elasticsearch.common.component.AbstractLifecycleComponent) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) RepositoryOperation(org.elasticsearch.repositories.RepositoryOperation) Snapshot(org.elasticsearch.snapshots.Snapshot) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) BlobStore(org.elasticsearch.common.blobstore.BlobStore) Store(org.elasticsearch.index.store.Store) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ShardId(org.elasticsearch.index.shard.ShardId) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) Executor(java.util.concurrent.Executor) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ArrayList(java.util.ArrayList) List(java.util.List) RateLimitingInputStream(org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream) FilterInputStream(java.io.FilterInputStream) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) InputStream(java.io.InputStream) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexOutput(org.apache.lucene.store.IndexOutput) IOException(java.io.IOException) ActionListener(org.elasticsearch.action.ActionListener) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException)

Aggregations

IOContext (org.apache.lucene.store.IOContext)3 Tuple (io.crate.common.collections.Tuple)2 InvalidArgumentException (io.crate.exceptions.InvalidArgumentException)2 FilterInputStream (java.io.FilterInputStream)2 IOException (java.io.IOException)2 InputStream (java.io.InputStream)2 StandardCharsets (java.nio.charset.StandardCharsets)2 NoSuchFileException (java.nio.file.NoSuchFileException)2 ArrayList (java.util.ArrayList)2 Collection (java.util.Collection)2 Collections (java.util.Collections)2 List (java.util.List)2 Locale (java.util.Locale)2 Map (java.util.Map)2 Set (java.util.Set)2 BlockingQueue (java.util.concurrent.BlockingQueue)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 Executor (java.util.concurrent.Executor)2 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)2 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)2