Search in sources :

Example 1 with BlobStoreIndexShardSnapshot

use of org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot in project OpenSearch by opensearch-project.

the class CloneSnapshotIT method testShardClone.

public void testShardClone() throws Exception {
    internalCluster().startMasterOnlyNode();
    internalCluster().startDataOnlyNode();
    final String repoName = "repo-name";
    final Path repoPath = randomRepoPath();
    createRepository(repoName, "fs", repoPath);
    final boolean useBwCFormat = randomBoolean();
    if (useBwCFormat) {
        initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT);
        // Re-create repo to clear repository data cache
        assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get());
        createRepository(repoName, "fs", repoPath);
    }
    final String indexName = "test-index";
    createIndexWithRandomDocs(indexName, randomIntBetween(5, 10));
    final String sourceSnapshot = "source-snapshot";
    final SnapshotInfo sourceSnapshotInfo = createFullSnapshot(repoName, sourceSnapshot);
    final BlobStoreRepository repository = (BlobStoreRepository) internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
    final RepositoryData repositoryData = getRepositoryData(repoName);
    final IndexId indexId = repositoryData.resolveIndexId(indexName);
    final int shardId = 0;
    final RepositoryShardId repositoryShardId = new RepositoryShardId(indexId, shardId);
    final SnapshotId targetSnapshotId = new SnapshotId("target-snapshot", UUIDs.randomBase64UUID(random()));
    final String currentShardGen;
    if (useBwCFormat) {
        currentShardGen = null;
    } else {
        currentShardGen = repositoryData.shardGenerations().getShardGen(indexId, shardId);
    }
    final String newShardGeneration = PlainActionFuture.get(f -> repository.cloneShardSnapshot(sourceSnapshotInfo.snapshotId(), targetSnapshotId, repositoryShardId, currentShardGen, f));
    if (useBwCFormat) {
        final long gen = Long.parseLong(newShardGeneration);
        // Initial snapshot brought it to 0, clone increments it to 1
        assertEquals(gen, 1L);
    }
    final BlobStoreIndexShardSnapshot targetShardSnapshot = readShardSnapshot(repository, repositoryShardId, targetSnapshotId);
    final BlobStoreIndexShardSnapshot sourceShardSnapshot = readShardSnapshot(repository, repositoryShardId, sourceSnapshotInfo.snapshotId());
    assertThat(targetShardSnapshot.incrementalFileCount(), is(0));
    final List<BlobStoreIndexShardSnapshot.FileInfo> sourceFiles = sourceShardSnapshot.indexFiles();
    final List<BlobStoreIndexShardSnapshot.FileInfo> targetFiles = targetShardSnapshot.indexFiles();
    final int fileCount = sourceFiles.size();
    assertEquals(fileCount, targetFiles.size());
    for (int i = 0; i < fileCount; i++) {
        assertTrue(sourceFiles.get(i).isSame(targetFiles.get(i)));
    }
    final BlobStoreIndexShardSnapshots shardMetadata = readShardGeneration(repository, repositoryShardId, newShardGeneration);
    final List<SnapshotFiles> snapshotFiles = shardMetadata.snapshots();
    assertThat(snapshotFiles, hasSize(2));
    assertTrue(snapshotFiles.get(0).isSame(snapshotFiles.get(1)));
    // verify that repeated cloning is idempotent
    final String newShardGeneration2 = PlainActionFuture.get(f -> repository.cloneShardSnapshot(sourceSnapshotInfo.snapshotId(), targetSnapshotId, repositoryShardId, newShardGeneration, f));
    assertEquals(newShardGeneration, newShardGeneration2);
}
Also used : Path(java.nio.file.Path) IndexId(org.opensearch.repositories.IndexId) BlobStoreIndexShardSnapshot(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) Matchers.containsString(org.hamcrest.Matchers.containsString) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) RepositoryData(org.opensearch.repositories.RepositoryData) BlobStoreIndexShardSnapshots(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) SnapshotFiles(org.opensearch.index.snapshots.blobstore.SnapshotFiles) BlobStoreRepository(org.opensearch.repositories.blobstore.BlobStoreRepository)

Example 2 with BlobStoreIndexShardSnapshot

use of org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot in project OpenSearch by opensearch-project.

the class BlobStoreRepository method snapshotShard.

@Override
public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, String shardStateIdentifier, IndexShardSnapshotStatus snapshotStatus, Version repositoryMetaVersion, Map<String, Object> userMetadata, ActionListener<String> listener) {
    if (isReadOnly()) {
        listener.onFailure(new RepositoryException(metadata.name(), "cannot snapshot shard on a readonly repository"));
        return;
    }
    final ShardId shardId = store.shardId();
    final long startTime = threadPool.absoluteTimeInMillis();
    try {
        final String generation = snapshotStatus.generation();
        logger.debug("[{}] [{}] snapshot to [{}] [{}] ...", shardId, snapshotId, metadata.name(), generation);
        final BlobContainer shardContainer = shardContainer(indexId, shardId);
        final Set<String> blobs;
        if (generation == null) {
            try {
                blobs = shardContainer.listBlobsByPrefix(INDEX_FILE_PREFIX).keySet();
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
            }
        } else {
            blobs = Collections.singleton(INDEX_FILE_PREFIX + generation);
        }
        Tuple<BlobStoreIndexShardSnapshots, String> tuple = buildBlobStoreIndexShardSnapshots(blobs, shardContainer, generation);
        BlobStoreIndexShardSnapshots snapshots = tuple.v1();
        String fileListGeneration = tuple.v2();
        if (snapshots.snapshots().stream().anyMatch(sf -> sf.snapshot().equals(snapshotId.getName()))) {
            throw new IndexShardSnapshotFailedException(shardId, "Duplicate snapshot name [" + snapshotId.getName() + "] detected, aborting");
        }
        // First inspect all known SegmentInfos instances to see if we already have an equivalent commit in the repository
        final List<BlobStoreIndexShardSnapshot.FileInfo> filesFromSegmentInfos = Optional.ofNullable(shardStateIdentifier).map(id -> {
            for (SnapshotFiles snapshotFileSet : snapshots.snapshots()) {
                if (id.equals(snapshotFileSet.shardStateIdentifier())) {
                    return snapshotFileSet.indexFiles();
                }
            }
            return null;
        }).orElse(null);
        final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles;
        int indexIncrementalFileCount = 0;
        int indexTotalNumberOfFiles = 0;
        long indexIncrementalSize = 0;
        long indexTotalFileSize = 0;
        final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> filesToSnapshot = new LinkedBlockingQueue<>();
        // in the commit with files already in the repository
        if (filesFromSegmentInfos == null) {
            indexCommitPointFiles = new ArrayList<>();
            final Collection<String> fileNames;
            final Store.MetadataSnapshot metadataFromStore;
            try (Releasable ignored = incrementStoreRef(store, snapshotStatus, shardId)) {
                // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should
                try {
                    logger.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit);
                    metadataFromStore = store.getMetadata(snapshotIndexCommit);
                    fileNames = snapshotIndexCommit.getFileNames();
                } catch (IOException e) {
                    throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e);
                }
            }
            for (String fileName : fileNames) {
                if (snapshotStatus.isAborted()) {
                    logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
                    throw new AbortedSnapshotException();
                }
                logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
                final StoreFileMetadata md = metadataFromStore.get(fileName);
                BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null;
                List<BlobStoreIndexShardSnapshot.FileInfo> filesInfo = snapshots.findPhysicalIndexFiles(fileName);
                if (filesInfo != null) {
                    for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) {
                        if (fileInfo.isSame(md)) {
                            // a commit point file with the same name, size and checksum was already copied to repository
                            // we will reuse it for this snapshot
                            existingFileInfo = fileInfo;
                            break;
                        }
                    }
                }
                // We can skip writing blobs where the metadata hash is equal to the blob's contents because we store the hash/contents
                // directly in the shard level metadata in this case
                final boolean needsWrite = md.hashEqualsContents() == false;
                indexTotalFileSize += md.length();
                indexTotalNumberOfFiles++;
                if (existingFileInfo == null) {
                    indexIncrementalFileCount++;
                    indexIncrementalSize += md.length();
                    // create a new FileInfo
                    BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo((needsWrite ? UPLOADED_DATA_BLOB_PREFIX : VIRTUAL_DATA_BLOB_PREFIX) + UUIDs.randomBase64UUID(), md, chunkSize());
                    indexCommitPointFiles.add(snapshotFileInfo);
                    if (needsWrite) {
                        filesToSnapshot.add(snapshotFileInfo);
                    }
                    assert needsWrite || assertFileContentsMatchHash(snapshotFileInfo, store);
                } else {
                    indexCommitPointFiles.add(existingFileInfo);
                }
            }
        } else {
            for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesFromSegmentInfos) {
                indexTotalNumberOfFiles++;
                indexTotalFileSize += fileInfo.length();
            }
            indexCommitPointFiles = filesFromSegmentInfos;
        }
        snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileSize);
        final String indexGeneration;
        final boolean writeShardGens = SnapshotsService.useShardGenerations(repositoryMetaVersion);
        // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
        List<SnapshotFiles> newSnapshotsList = new ArrayList<>();
        newSnapshotsList.add(new SnapshotFiles(snapshotId.getName(), indexCommitPointFiles, shardStateIdentifier));
        for (SnapshotFiles point : snapshots) {
            newSnapshotsList.add(point);
        }
        final BlobStoreIndexShardSnapshots updatedBlobStoreIndexShardSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList);
        final Runnable afterWriteSnapBlob;
        if (writeShardGens) {
            // When using shard generations we can safely write the index-${uuid} blob before writing out any of the actual data
            // for this shard since the uuid named blob will simply not be referenced in case of error and thus we will never
            // reference a generation that has not had all its files fully upload.
            indexGeneration = UUIDs.randomBase64UUID();
            try {
                INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedBlobStoreIndexShardSnapshots, shardContainer, indexGeneration, compress);
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to write shard level snapshot metadata for [" + snapshotId + "] to [" + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration) + "]", e);
            }
            afterWriteSnapBlob = () -> {
            };
        } else {
            // When not using shard generations we can only write the index-${N} blob after all other work for this shard has
            // completed.
            // Also, in case of numeric shard generations the data node has to take care of deleting old shard generations.
            final long newGen = Long.parseLong(fileListGeneration) + 1;
            indexGeneration = Long.toString(newGen);
            // Delete all previous index-N blobs
            final List<String> blobsToDelete = blobs.stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList());
            assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))).max().orElse(-1L) < Long.parseLong(indexGeneration) : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + "] when deleting index-N blobs " + blobsToDelete;
            afterWriteSnapBlob = () -> {
                try {
                    writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots);
                } catch (IOException e) {
                    throw new IndexShardSnapshotFailedException(shardId, "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration) + "]", e);
                }
                try {
                    deleteFromContainer(shardContainer, blobsToDelete);
                } catch (IOException e) {
                    logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", snapshotId, shardId), e);
                }
            };
        }
        final StepListener<Collection<Void>> allFilesUploadedListener = new StepListener<>();
        allFilesUploadedListener.whenComplete(v -> {
            final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration());
            // now create and write the commit point
            logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
            try {
                INDEX_SHARD_SNAPSHOT_FORMAT.write(new BlobStoreIndexShardSnapshot(snapshotId.getName(), lastSnapshotStatus.getIndexVersion(), indexCommitPointFiles, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), lastSnapshotStatus.getIncrementalFileCount(), lastSnapshotStatus.getIncrementalSize()), shardContainer, snapshotId.getUUID(), compress);
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
            }
            afterWriteSnapBlob.run();
            snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), indexGeneration);
            listener.onResponse(indexGeneration);
        }, listener::onFailure);
        if (indexIncrementalFileCount == 0) {
            allFilesUploadedListener.onResponse(Collections.emptyList());
            return;
        }
        final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
        // Start as many workers as fit into the snapshot pool at once at the most
        final int workers = Math.min(threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(), indexIncrementalFileCount);
        final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, workers, allFilesUploadedListener);
        for (int i = 0; i < workers; ++i) {
            executeOneFileSnapshot(store, snapshotId, indexId, snapshotStatus, filesToSnapshot, executor, filesListener);
        }
    } catch (Exception e) {
        listener.onFailure(e);
    }
}
Also used : Metadata(org.opensearch.cluster.metadata.Metadata) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AllocationService(org.opensearch.cluster.routing.allocation.AllocationService) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Version(org.opensearch.Version) Strings(org.opensearch.common.Strings) AbortedSnapshotException(org.opensearch.snapshots.AbortedSnapshotException) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) RecoveryState(org.opensearch.indices.recovery.RecoveryState) Map(java.util.Map) Lucene(org.opensearch.common.lucene.Lucene) ActionListener(org.opensearch.action.ActionListener) IOContext(org.apache.lucene.store.IOContext) Repository(org.opensearch.repositories.Repository) TimeValue(org.opensearch.common.unit.TimeValue) ExceptionsHelper(org.opensearch.ExceptionsHelper) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) BlobStoreIndexShardSnapshot(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) BlockingQueue(java.util.concurrent.BlockingQueue) AbstractLifecycleComponent(org.opensearch.common.component.AbstractLifecycleComponent) Logger(org.apache.logging.log4j.Logger) RepositoryOperation(org.opensearch.repositories.RepositoryOperation) Stream(java.util.stream.Stream) ClusterStateUpdateTask(org.opensearch.cluster.ClusterStateUpdateTask) BytesArray(org.opensearch.common.bytes.BytesArray) BlobStoreIndexShardSnapshots(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) FsBlobContainer(org.opensearch.common.blobstore.fs.FsBlobContainer) StepListener(org.opensearch.action.StepListener) XContentType(org.opensearch.common.xcontent.XContentType) IndexCommit(org.apache.lucene.index.IndexCommit) ThreadPool(org.opensearch.threadpool.ThreadPool) BlobContainer(org.opensearch.common.blobstore.BlobContainer) Releasable(org.opensearch.common.lease.Releasable) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) ClusterState(org.opensearch.cluster.ClusterState) SnapshotMissingException(org.opensearch.snapshots.SnapshotMissingException) Numbers(org.opensearch.common.Numbers) SlicedInputStream(org.opensearch.index.snapshots.blobstore.SlicedInputStream) SnapshotException(org.opensearch.snapshots.SnapshotException) Streams(org.opensearch.common.io.Streams) CompressorFactory(org.opensearch.common.compress.CompressorFactory) RepositoryVerificationException(org.opensearch.repositories.RepositoryVerificationException) RepositoryCleanupInProgress(org.opensearch.cluster.RepositoryCleanupInProgress) InputStreamIndexInput(org.opensearch.common.lucene.store.InputStreamIndexInput) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) Executor(java.util.concurrent.Executor) SnapshotInfo(org.opensearch.snapshots.SnapshotInfo) RepositoryMetadata(org.opensearch.cluster.metadata.RepositoryMetadata) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.opensearch.index.snapshots.IndexShardSnapshotFailedException) NotXContentException(org.opensearch.common.compress.NotXContentException) AtomicLong(java.util.concurrent.atomic.AtomicLong) RepositoryCleanupResult(org.opensearch.repositories.RepositoryCleanupResult) BlobPath(org.opensearch.common.blobstore.BlobPath) NamedXContentRegistry(org.opensearch.common.xcontent.NamedXContentRegistry) ClusterService(org.opensearch.cluster.service.ClusterService) CounterMetric(org.opensearch.common.metrics.CounterMetric) ShardGenerations(org.opensearch.repositories.ShardGenerations) NoSuchFileException(java.nio.file.NoSuchFileException) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) SnapshotCreationException(org.opensearch.snapshots.SnapshotCreationException) ByteSizeUnit(org.opensearch.common.unit.ByteSizeUnit) SnapshotFiles(org.opensearch.index.snapshots.blobstore.SnapshotFiles) SnapshotsService(org.opensearch.snapshots.SnapshotsService) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ConcurrentCollections(org.opensearch.common.util.concurrent.ConcurrentCollections) XContentParser(org.opensearch.common.xcontent.XContentParser) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) MapperService(org.opensearch.index.mapper.MapperService) IndexId(org.opensearch.repositories.IndexId) XContentFactory(org.opensearch.common.xcontent.XContentFactory) RepositoryStats(org.opensearch.repositories.RepositoryStats) BlobMetadata(org.opensearch.common.blobstore.BlobMetadata) RecoverySettings(org.opensearch.indices.recovery.RecoverySettings) RepositoryException(org.opensearch.repositories.RepositoryException) FileInfo.canonicalName(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) BytesRef(org.apache.lucene.util.BytesRef) SnapshotId(org.opensearch.snapshots.SnapshotId) Collection(java.util.Collection) LoggingDeprecationHandler(org.opensearch.common.xcontent.LoggingDeprecationHandler) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Store(org.opensearch.index.store.Store) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) Nullable(org.opensearch.common.Nullable) Tuple(org.opensearch.common.collect.Tuple) BlobStore(org.opensearch.common.blobstore.BlobStore) List(java.util.List) Optional(java.util.Optional) BytesReference(org.opensearch.common.bytes.BytesReference) RateLimitingInputStream(org.opensearch.index.snapshots.blobstore.RateLimitingInputStream) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) ActionRunnable(org.opensearch.action.ActionRunnable) SnapshotsInProgress(org.opensearch.cluster.SnapshotsInProgress) ByteSizeValue(org.opensearch.common.unit.ByteSizeValue) SnapshotDeletionsInProgress(org.opensearch.cluster.SnapshotDeletionsInProgress) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) FilterInputStream(java.io.FilterInputStream) IndexShardSnapshotStatus(org.opensearch.index.snapshots.IndexShardSnapshotStatus) IndexMetaDataGenerations(org.opensearch.repositories.IndexMetaDataGenerations) UUIDs(org.opensearch.common.UUIDs) StoreFileMetadata(org.opensearch.index.store.StoreFileMetadata) IndexOutput(org.apache.lucene.store.IndexOutput) IndexShardRestoreFailedException(org.opensearch.index.snapshots.IndexShardRestoreFailedException) RepositoryData(org.opensearch.repositories.RepositoryData) Setting(org.opensearch.common.settings.Setting) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ShardId(org.opensearch.index.shard.ShardId) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) DeleteResult(org.opensearch.common.blobstore.DeleteResult) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) IndexShardSnapshotStatus(org.opensearch.index.snapshots.IndexShardSnapshotStatus) BlobStoreIndexShardSnapshot(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) ArrayList(java.util.ArrayList) Store(org.opensearch.index.store.Store) BlobStore(org.opensearch.common.blobstore.BlobStore) StoreFileMetadata(org.opensearch.index.store.StoreFileMetadata) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) ShardId(org.opensearch.index.shard.ShardId) BlobStoreIndexShardSnapshots(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) SnapshotFiles(org.opensearch.index.snapshots.blobstore.SnapshotFiles) Executor(java.util.concurrent.Executor) AbortedSnapshotException(org.opensearch.snapshots.AbortedSnapshotException) IndexShardSnapshotFailedException(org.opensearch.index.snapshots.IndexShardSnapshotFailedException) RepositoryException(org.opensearch.repositories.RepositoryException) IOException(java.io.IOException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AbortedSnapshotException(org.opensearch.snapshots.AbortedSnapshotException) SnapshotMissingException(org.opensearch.snapshots.SnapshotMissingException) SnapshotException(org.opensearch.snapshots.SnapshotException) RepositoryVerificationException(org.opensearch.repositories.RepositoryVerificationException) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.opensearch.index.snapshots.IndexShardSnapshotFailedException) NotXContentException(org.opensearch.common.compress.NotXContentException) NoSuchFileException(java.nio.file.NoSuchFileException) SnapshotCreationException(org.opensearch.snapshots.SnapshotCreationException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) RepositoryException(org.opensearch.repositories.RepositoryException) IndexShardRestoreFailedException(org.opensearch.index.snapshots.IndexShardRestoreFailedException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) FsBlobContainer(org.opensearch.common.blobstore.fs.FsBlobContainer) BlobContainer(org.opensearch.common.blobstore.BlobContainer) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) ActionRunnable(org.opensearch.action.ActionRunnable) Collection(java.util.Collection) Releasable(org.opensearch.common.lease.Releasable) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) StepListener(org.opensearch.action.StepListener)

Example 3 with BlobStoreIndexShardSnapshot

use of org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot in project OpenSearch by opensearch-project.

the class BlobStoreRepository method cloneShardSnapshot.

@Override
public void cloneShardSnapshot(SnapshotId source, SnapshotId target, RepositoryShardId shardId, @Nullable String shardGeneration, ActionListener<String> listener) {
    if (isReadOnly()) {
        listener.onFailure(new RepositoryException(metadata.name(), "cannot clone shard snapshot on a readonly repository"));
        return;
    }
    final IndexId index = shardId.index();
    final int shardNum = shardId.shardId();
    final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
    executor.execute(ActionRunnable.supply(listener, () -> {
        final long startTime = threadPool.absoluteTimeInMillis();
        final BlobContainer shardContainer = shardContainer(index, shardNum);
        final BlobStoreIndexShardSnapshots existingSnapshots;
        final String newGen;
        final String existingShardGen;
        if (shardGeneration == null) {
            Tuple<BlobStoreIndexShardSnapshots, Long> tuple = buildBlobStoreIndexShardSnapshots(shardContainer.listBlobsByPrefix(INDEX_FILE_PREFIX).keySet(), shardContainer);
            existingShardGen = String.valueOf(tuple.v2());
            newGen = String.valueOf(tuple.v2() + 1);
            existingSnapshots = tuple.v1();
        } else {
            newGen = UUIDs.randomBase64UUID();
            existingSnapshots = buildBlobStoreIndexShardSnapshots(Collections.emptySet(), shardContainer, shardGeneration).v1();
            existingShardGen = shardGeneration;
        }
        SnapshotFiles existingTargetFiles = null;
        SnapshotFiles sourceFiles = null;
        for (SnapshotFiles existingSnapshot : existingSnapshots) {
            final String snapshotName = existingSnapshot.snapshot();
            if (snapshotName.equals(target.getName())) {
                existingTargetFiles = existingSnapshot;
            } else if (snapshotName.equals(source.getName())) {
                sourceFiles = existingSnapshot;
            }
            if (sourceFiles != null && existingTargetFiles != null) {
                break;
            }
        }
        if (sourceFiles == null) {
            throw new RepositoryException(metadata.name(), "Can't create clone of [" + shardId + "] for snapshot [" + target + "]. The source snapshot [" + source + "] was not found in the shard metadata.");
        }
        if (existingTargetFiles != null) {
            if (existingTargetFiles.isSame(sourceFiles)) {
                return existingShardGen;
            }
            throw new RepositoryException(metadata.name(), "Can't create clone of [" + shardId + "] for snapshot [" + target + "]. A snapshot by that name already exists for this shard.");
        }
        final BlobStoreIndexShardSnapshot sourceMeta = loadShardSnapshot(shardContainer, source);
        logger.trace("[{}] [{}] writing shard snapshot file for clone", shardId, target);
        INDEX_SHARD_SNAPSHOT_FORMAT.write(sourceMeta.asClone(target.getName(), startTime, threadPool.absoluteTimeInMillis() - startTime), shardContainer, target.getUUID(), compress);
        INDEX_SHARD_SNAPSHOTS_FORMAT.write(existingSnapshots.withClone(source.getName(), target.getName()), shardContainer, newGen, compress);
        return newGen;
    }));
}
Also used : BlobStoreIndexShardSnapshots(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) SnapshotFiles(org.opensearch.index.snapshots.blobstore.SnapshotFiles) IndexId(org.opensearch.repositories.IndexId) BlobStoreIndexShardSnapshot(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) Executor(java.util.concurrent.Executor) FsBlobContainer(org.opensearch.common.blobstore.fs.FsBlobContainer) BlobContainer(org.opensearch.common.blobstore.BlobContainer) RepositoryException(org.opensearch.repositories.RepositoryException) Tuple(org.opensearch.common.collect.Tuple)

Example 4 with BlobStoreIndexShardSnapshot

use of org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot in project OpenSearch by opensearch-project.

the class BlobStoreRepository method restoreShard.

@Override
public void restoreShard(Store store, SnapshotId snapshotId, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState, ActionListener<Void> listener) {
    final ShardId shardId = store.shardId();
    final ActionListener<Void> restoreListener = ActionListener.delegateResponse(listener, (l, e) -> l.onFailure(new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e)));
    final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
    final BlobContainer container = shardContainer(indexId, snapshotShardId);
    executor.execute(ActionRunnable.wrap(restoreListener, l -> {
        final BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(container, snapshotId);
        final SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles(), null);
        new FileRestoreContext(metadata.name(), shardId, snapshotId, recoveryState) {

            @Override
            protected void restoreFiles(List<BlobStoreIndexShardSnapshot.FileInfo> filesToRecover, Store store, ActionListener<Void> listener) {
                if (filesToRecover.isEmpty()) {
                    listener.onResponse(null);
                } else {
                    // Start as many workers as fit into the snapshot pool at once at the most
                    final int workers = Math.min(threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(), snapshotFiles.indexFiles().size());
                    final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> files = new LinkedBlockingQueue<>(filesToRecover);
                    final ActionListener<Void> allFilesListener = fileQueueListener(files, workers, ActionListener.map(listener, v -> null));
                    // restore the files from the snapshot to the Lucene store
                    for (int i = 0; i < workers; ++i) {
                        try {
                            executeOneFileRestore(files, allFilesListener);
                        } catch (Exception e) {
                            allFilesListener.onFailure(e);
                        }
                    }
                }
            }

            private void executeOneFileRestore(BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> files, ActionListener<Void> allFilesListener) throws InterruptedException {
                final BlobStoreIndexShardSnapshot.FileInfo fileToRecover = files.poll(0L, TimeUnit.MILLISECONDS);
                if (fileToRecover == null) {
                    allFilesListener.onResponse(null);
                } else {
                    executor.execute(ActionRunnable.wrap(allFilesListener, filesListener -> {
                        store.incRef();
                        try {
                            restoreFile(fileToRecover, store);
                        } finally {
                            store.decRef();
                        }
                        executeOneFileRestore(files, filesListener);
                    }));
                }
            }

            private void restoreFile(BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store store) throws IOException {
                ensureNotClosing(store);
                logger.trace(() -> new ParameterizedMessage("[{}] restoring [{}] to [{}]", metadata.name(), fileInfo, store));
                boolean success = false;
                try (IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) {
                    if (fileInfo.name().startsWith(VIRTUAL_DATA_BLOB_PREFIX)) {
                        final BytesRef hash = fileInfo.metadata().hash();
                        indexOutput.writeBytes(hash.bytes, hash.offset, hash.length);
                        recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), hash.length);
                    } else {
                        try (InputStream stream = maybeRateLimitRestores(new SlicedInputStream(fileInfo.numberOfParts()) {

                            @Override
                            protected InputStream openSlice(int slice) throws IOException {
                                ensureNotClosing(store);
                                return container.readBlob(fileInfo.partName(slice));
                            }
                        })) {
                            final byte[] buffer = new byte[Math.toIntExact(Math.min(bufferSize, fileInfo.length()))];
                            int length;
                            while ((length = stream.read(buffer)) > 0) {
                                ensureNotClosing(store);
                                indexOutput.writeBytes(buffer, 0, length);
                                recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), length);
                            }
                        }
                    }
                    Store.verify(indexOutput);
                    indexOutput.close();
                    store.directory().sync(Collections.singleton(fileInfo.physicalName()));
                    success = true;
                } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
                    try {
                        store.markStoreCorrupted(ex);
                    } catch (IOException e) {
                        logger.warn("store cannot be marked as corrupted", e);
                    }
                    throw ex;
                } finally {
                    if (success == false) {
                        store.deleteQuiet(fileInfo.physicalName());
                    }
                }
            }

            void ensureNotClosing(final Store store) throws AlreadyClosedException {
                assert store.refCount() > 0;
                if (store.isClosing()) {
                    throw new AlreadyClosedException("store is closing");
                }
            }
        }.restore(snapshotFiles, store, l);
    }));
}
Also used : Metadata(org.opensearch.cluster.metadata.Metadata) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AllocationService(org.opensearch.cluster.routing.allocation.AllocationService) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Version(org.opensearch.Version) Strings(org.opensearch.common.Strings) AbortedSnapshotException(org.opensearch.snapshots.AbortedSnapshotException) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) RecoveryState(org.opensearch.indices.recovery.RecoveryState) Map(java.util.Map) Lucene(org.opensearch.common.lucene.Lucene) ActionListener(org.opensearch.action.ActionListener) IOContext(org.apache.lucene.store.IOContext) Repository(org.opensearch.repositories.Repository) TimeValue(org.opensearch.common.unit.TimeValue) ExceptionsHelper(org.opensearch.ExceptionsHelper) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) BlobStoreIndexShardSnapshot(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) BlockingQueue(java.util.concurrent.BlockingQueue) AbstractLifecycleComponent(org.opensearch.common.component.AbstractLifecycleComponent) Logger(org.apache.logging.log4j.Logger) RepositoryOperation(org.opensearch.repositories.RepositoryOperation) Stream(java.util.stream.Stream) ClusterStateUpdateTask(org.opensearch.cluster.ClusterStateUpdateTask) BytesArray(org.opensearch.common.bytes.BytesArray) BlobStoreIndexShardSnapshots(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) FsBlobContainer(org.opensearch.common.blobstore.fs.FsBlobContainer) StepListener(org.opensearch.action.StepListener) XContentType(org.opensearch.common.xcontent.XContentType) IndexCommit(org.apache.lucene.index.IndexCommit) ThreadPool(org.opensearch.threadpool.ThreadPool) BlobContainer(org.opensearch.common.blobstore.BlobContainer) Releasable(org.opensearch.common.lease.Releasable) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) ClusterState(org.opensearch.cluster.ClusterState) SnapshotMissingException(org.opensearch.snapshots.SnapshotMissingException) Numbers(org.opensearch.common.Numbers) SlicedInputStream(org.opensearch.index.snapshots.blobstore.SlicedInputStream) SnapshotException(org.opensearch.snapshots.SnapshotException) Streams(org.opensearch.common.io.Streams) CompressorFactory(org.opensearch.common.compress.CompressorFactory) RepositoryVerificationException(org.opensearch.repositories.RepositoryVerificationException) RepositoryCleanupInProgress(org.opensearch.cluster.RepositoryCleanupInProgress) InputStreamIndexInput(org.opensearch.common.lucene.store.InputStreamIndexInput) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) Executor(java.util.concurrent.Executor) SnapshotInfo(org.opensearch.snapshots.SnapshotInfo) RepositoryMetadata(org.opensearch.cluster.metadata.RepositoryMetadata) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.opensearch.index.snapshots.IndexShardSnapshotFailedException) NotXContentException(org.opensearch.common.compress.NotXContentException) AtomicLong(java.util.concurrent.atomic.AtomicLong) RepositoryCleanupResult(org.opensearch.repositories.RepositoryCleanupResult) BlobPath(org.opensearch.common.blobstore.BlobPath) NamedXContentRegistry(org.opensearch.common.xcontent.NamedXContentRegistry) ClusterService(org.opensearch.cluster.service.ClusterService) CounterMetric(org.opensearch.common.metrics.CounterMetric) ShardGenerations(org.opensearch.repositories.ShardGenerations) NoSuchFileException(java.nio.file.NoSuchFileException) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) SnapshotCreationException(org.opensearch.snapshots.SnapshotCreationException) ByteSizeUnit(org.opensearch.common.unit.ByteSizeUnit) SnapshotFiles(org.opensearch.index.snapshots.blobstore.SnapshotFiles) SnapshotsService(org.opensearch.snapshots.SnapshotsService) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ConcurrentCollections(org.opensearch.common.util.concurrent.ConcurrentCollections) XContentParser(org.opensearch.common.xcontent.XContentParser) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) MapperService(org.opensearch.index.mapper.MapperService) IndexId(org.opensearch.repositories.IndexId) XContentFactory(org.opensearch.common.xcontent.XContentFactory) RepositoryStats(org.opensearch.repositories.RepositoryStats) BlobMetadata(org.opensearch.common.blobstore.BlobMetadata) RecoverySettings(org.opensearch.indices.recovery.RecoverySettings) RepositoryException(org.opensearch.repositories.RepositoryException) FileInfo.canonicalName(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) BytesRef(org.apache.lucene.util.BytesRef) SnapshotId(org.opensearch.snapshots.SnapshotId) Collection(java.util.Collection) LoggingDeprecationHandler(org.opensearch.common.xcontent.LoggingDeprecationHandler) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Store(org.opensearch.index.store.Store) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) Nullable(org.opensearch.common.Nullable) Tuple(org.opensearch.common.collect.Tuple) BlobStore(org.opensearch.common.blobstore.BlobStore) List(java.util.List) Optional(java.util.Optional) BytesReference(org.opensearch.common.bytes.BytesReference) RateLimitingInputStream(org.opensearch.index.snapshots.blobstore.RateLimitingInputStream) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) ActionRunnable(org.opensearch.action.ActionRunnable) SnapshotsInProgress(org.opensearch.cluster.SnapshotsInProgress) ByteSizeValue(org.opensearch.common.unit.ByteSizeValue) SnapshotDeletionsInProgress(org.opensearch.cluster.SnapshotDeletionsInProgress) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) FilterInputStream(java.io.FilterInputStream) IndexShardSnapshotStatus(org.opensearch.index.snapshots.IndexShardSnapshotStatus) IndexMetaDataGenerations(org.opensearch.repositories.IndexMetaDataGenerations) UUIDs(org.opensearch.common.UUIDs) StoreFileMetadata(org.opensearch.index.store.StoreFileMetadata) IndexOutput(org.apache.lucene.store.IndexOutput) IndexShardRestoreFailedException(org.opensearch.index.snapshots.IndexShardRestoreFailedException) RepositoryData(org.opensearch.repositories.RepositoryData) Setting(org.opensearch.common.settings.Setting) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ShardId(org.opensearch.index.shard.ShardId) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) DeleteResult(org.opensearch.common.blobstore.DeleteResult) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) BlobStoreIndexShardSnapshot(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) IndexShardRestoreFailedException(org.opensearch.index.snapshots.IndexShardRestoreFailedException) Store(org.opensearch.index.store.Store) BlobStore(org.opensearch.common.blobstore.BlobStore) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) ShardId(org.opensearch.index.shard.ShardId) SnapshotFiles(org.opensearch.index.snapshots.blobstore.SnapshotFiles) Executor(java.util.concurrent.Executor) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ArrayList(java.util.ArrayList) List(java.util.List) BytesRef(org.apache.lucene.util.BytesRef) BlockingQueue(java.util.concurrent.BlockingQueue) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) SlicedInputStream(org.opensearch.index.snapshots.blobstore.SlicedInputStream) RateLimitingInputStream(org.opensearch.index.snapshots.blobstore.RateLimitingInputStream) FilterInputStream(java.io.FilterInputStream) InputStream(java.io.InputStream) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexOutput(org.apache.lucene.store.IndexOutput) IOException(java.io.IOException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AbortedSnapshotException(org.opensearch.snapshots.AbortedSnapshotException) SnapshotMissingException(org.opensearch.snapshots.SnapshotMissingException) SnapshotException(org.opensearch.snapshots.SnapshotException) RepositoryVerificationException(org.opensearch.repositories.RepositoryVerificationException) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.opensearch.index.snapshots.IndexShardSnapshotFailedException) NotXContentException(org.opensearch.common.compress.NotXContentException) NoSuchFileException(java.nio.file.NoSuchFileException) SnapshotCreationException(org.opensearch.snapshots.SnapshotCreationException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) RepositoryException(org.opensearch.repositories.RepositoryException) IndexShardRestoreFailedException(org.opensearch.index.snapshots.IndexShardRestoreFailedException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) ActionListener(org.opensearch.action.ActionListener) FsBlobContainer(org.opensearch.common.blobstore.fs.FsBlobContainer) BlobContainer(org.opensearch.common.blobstore.BlobContainer) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) SlicedInputStream(org.opensearch.index.snapshots.blobstore.SlicedInputStream) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException)

Aggregations

BlobStoreIndexShardSnapshot (org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot)4 BlobStoreIndexShardSnapshots (org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots)4 SnapshotFiles (org.opensearch.index.snapshots.blobstore.SnapshotFiles)4 IndexId (org.opensearch.repositories.IndexId)4 Executor (java.util.concurrent.Executor)3 BlobContainer (org.opensearch.common.blobstore.BlobContainer)3 FsBlobContainer (org.opensearch.common.blobstore.fs.FsBlobContainer)3 Tuple (org.opensearch.common.collect.Tuple)3 RepositoryData (org.opensearch.repositories.RepositoryData)3 RepositoryException (org.opensearch.repositories.RepositoryException)3 RepositoryShardId (org.opensearch.repositories.RepositoryShardId)3 FilterInputStream (java.io.FilterInputStream)2 IOException (java.io.IOException)2 InputStream (java.io.InputStream)2 NoSuchFileException (java.nio.file.NoSuchFileException)2 ArrayList (java.util.ArrayList)2 Collection (java.util.Collection)2 Collections (java.util.Collections)2 List (java.util.List)2 Map (java.util.Map)2