Search in sources :

Example 21 with IndexCommit

use of org.apache.lucene.index.IndexCommit in project crate by crate.

the class NoOpEngine method open.

@Override
protected DirectoryReader open(final IndexCommit commit) throws IOException {
    final Directory directory = commit.getDirectory();
    final List<IndexCommit> indexCommits = DirectoryReader.listCommits(directory);
    final IndexCommit indexCommit = indexCommits.get(indexCommits.size() - 1);
    return new DirectoryReader(directory, new LeafReader[0], null) {

        @Override
        protected DirectoryReader doOpenIfChanged() throws IOException {
            return null;
        }

        @Override
        protected DirectoryReader doOpenIfChanged(IndexCommit commit) throws IOException {
            return null;
        }

        @Override
        protected DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws IOException {
            return null;
        }

        @Override
        public long getVersion() {
            return 0;
        }

        @Override
        public boolean isCurrent() throws IOException {
            return true;
        }

        @Override
        public IndexCommit getIndexCommit() throws IOException {
            return indexCommit;
        }

        @Override
        protected void doClose() throws IOException {
        }

        @Override
        public CacheHelper getReaderCacheHelper() {
            return null;
        }
    };
}
Also used : DirectoryReader(org.apache.lucene.index.DirectoryReader) IndexWriter(org.apache.lucene.index.IndexWriter) IndexCommit(org.apache.lucene.index.IndexCommit) Directory(org.apache.lucene.store.Directory)

Example 22 with IndexCommit

use of org.apache.lucene.index.IndexCommit in project crate by crate.

the class CombinedDeletionPolicy method onCommit.

@Override
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
    final IndexCommit safeCommit;
    synchronized (this) {
        final int keptPosition = indexOfKeptCommits(commits, globalCheckpointSupplier.getAsLong());
        this.safeCommitInfo = SafeCommitInfo.EMPTY;
        this.lastCommit = commits.get(commits.size() - 1);
        this.safeCommit = commits.get(keptPosition);
        if (keptPosition == commits.size() - 1) {
            this.maxSeqNoOfNextSafeCommit = Long.MAX_VALUE;
        } else {
            this.maxSeqNoOfNextSafeCommit = Long.parseLong(commits.get(keptPosition + 1).getUserData().get(SequenceNumbers.MAX_SEQ_NO));
        }
        for (int i = 0; i < keptPosition; i++) {
            if (snapshottedCommits.containsKey(commits.get(i)) == false) {
                deleteCommit(commits.get(i));
            }
        }
        updateRetentionPolicy();
        safeCommit = this.safeCommit;
    }
    assert Thread.holdsLock(this) == false : "should not block concurrent acquire or relesase";
    safeCommitInfo = new SafeCommitInfo(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), getDocCountOfCommit(safeCommit));
    // This is protected from concurrent calls by a lock on the IndexWriter, but this assertion makes sure that we notice if that ceases
    // to be true in future. It is not disastrous if safeCommitInfo refers to an older safeCommit, it just means that we might retain a
    // bit more history and do a few more ops-based recoveries than we would otherwise.
    final IndexCommit newSafeCommit = this.safeCommit;
    assert safeCommit == newSafeCommit : "onCommit called concurrently? " + safeCommit.getGeneration() + " vs " + newSafeCommit.getGeneration();
}
Also used : IndexCommit(org.apache.lucene.index.IndexCommit)

Example 23 with IndexCommit

use of org.apache.lucene.index.IndexCommit in project crate by crate.

the class CombinedDeletionPolicy method acquireIndexCommit.

/**
 * Captures the most recent commit point {@link #lastCommit} or the most recent safe commit point {@link #safeCommit}.
 * Index files of the capturing commit point won't be released until the commit reference is closed.
 *
 * @param acquiringSafeCommit captures the most recent safe commit point if true; otherwise captures the most recent commit point.
 */
synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit) {
    assert safeCommit != null : "Safe commit is not initialized yet";
    assert lastCommit != null : "Last commit is not initialized yet";
    final IndexCommit snapshotting = acquiringSafeCommit ? safeCommit : lastCommit;
    // increase refCount
    snapshottedCommits.addTo(snapshotting, 1);
    return new SnapshotIndexCommit(snapshotting);
}
Also used : IndexCommit(org.apache.lucene.index.IndexCommit)

Example 24 with IndexCommit

use of org.apache.lucene.index.IndexCommit in project crate by crate.

the class Store method findSafeIndexCommit.

/**
 * Returns a {@link org.elasticsearch.index.seqno.SequenceNumbers.CommitInfo} of the safe commit if exists.
 */
public Optional<SequenceNumbers.CommitInfo> findSafeIndexCommit(long globalCheckpoint) throws IOException {
    final List<IndexCommit> commits = DirectoryReader.listCommits(directory);
    assert commits.isEmpty() == false : "no commit found";
    final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commits, globalCheckpoint);
    final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(safeCommit.getUserData().entrySet());
    // all operations of the safe commit must be at most the global checkpoint.
    if (commitInfo.maxSeqNo <= globalCheckpoint) {
        return Optional.of(commitInfo);
    } else {
        return Optional.empty();
    }
}
Also used : SequenceNumbers(org.elasticsearch.index.seqno.SequenceNumbers) IndexCommit(org.apache.lucene.index.IndexCommit)

Example 25 with IndexCommit

use of org.apache.lucene.index.IndexCommit in project crate by crate.

the class BlobStoreRepository method snapshotShard.

@Override
public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, boolean writeShardGens, ActionListener<String> listener) {
    final ShardId shardId = store.shardId();
    final long startTime = threadPool.absoluteTimeInMillis();
    try {
        final String generation = snapshotStatus.generation();
        LOGGER.debug("[{}] [{}] snapshot to [{}] [{}] ...", shardId, snapshotId, metadata.name(), generation);
        final BlobContainer shardContainer = shardContainer(indexId, shardId);
        final Set<String> blobs;
        if (generation == null) {
            try {
                blobs = shardContainer.listBlobsByPrefix(INDEX_FILE_PREFIX).keySet();
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
            }
        } else {
            blobs = Collections.singleton(INDEX_FILE_PREFIX + generation);
        }
        Tuple<BlobStoreIndexShardSnapshots, String> tuple = buildBlobStoreIndexShardSnapshots(blobs, shardContainer, generation);
        BlobStoreIndexShardSnapshots snapshots = tuple.v1();
        final String fileListGeneration = tuple.v2();
        if (snapshots.snapshots().stream().anyMatch(sf -> sf.snapshot().equals(snapshotId.getName()))) {
            throw new IndexShardSnapshotFailedException(shardId, "Duplicate snapshot name [" + snapshotId.getName() + "] detected, aborting");
        }
        final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles = new ArrayList<>();
        final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> filesToSnapshot = new LinkedBlockingQueue<>();
        store.incRef();
        final Collection<String> fileNames;
        final Store.MetadataSnapshot metadataFromStore;
        try {
            // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should
            try {
                LOGGER.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit);
                metadataFromStore = store.getMetadata(snapshotIndexCommit);
                fileNames = snapshotIndexCommit.getFileNames();
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e);
            }
        } finally {
            store.decRef();
        }
        int indexIncrementalFileCount = 0;
        int indexTotalNumberOfFiles = 0;
        long indexIncrementalSize = 0;
        long indexTotalFileCount = 0;
        for (String fileName : fileNames) {
            if (snapshotStatus.isAborted()) {
                LOGGER.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
                throw new IndexShardSnapshotFailedException(shardId, "Aborted");
            }
            LOGGER.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
            final StoreFileMetadata md = metadataFromStore.get(fileName);
            BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null;
            List<BlobStoreIndexShardSnapshot.FileInfo> filesInfo = snapshots.findPhysicalIndexFiles(fileName);
            if (filesInfo != null) {
                for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) {
                    if (fileInfo.isSame(md)) {
                        // a commit point file with the same name, size and checksum was already copied to repository
                        // we will reuse it for this snapshot
                        existingFileInfo = fileInfo;
                        break;
                    }
                }
            }
            indexTotalFileCount += md.length();
            indexTotalNumberOfFiles++;
            if (existingFileInfo == null) {
                indexIncrementalFileCount++;
                indexIncrementalSize += md.length();
                // create a new FileInfo
                BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize());
                indexCommitPointFiles.add(snapshotFileInfo);
                filesToSnapshot.add(snapshotFileInfo);
            } else {
                indexCommitPointFiles.add(existingFileInfo);
            }
        }
        snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount);
        assert indexIncrementalFileCount == filesToSnapshot.size();
        final StepListener<Collection<Void>> allFilesUploadedListener = new StepListener<>();
        allFilesUploadedListener.whenComplete(v -> {
            final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration());
            // now create and write the commit point
            final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), lastSnapshotStatus.getIndexVersion(), indexCommitPointFiles, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), lastSnapshotStatus.getIncrementalFileCount(), lastSnapshotStatus.getIncrementalSize());
            LOGGER.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
            try {
                indexShardSnapshotFormat.write(snapshot, shardContainer, snapshotId.getUUID(), false);
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
            }
            // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
            List<SnapshotFiles> newSnapshotsList = new ArrayList<>();
            newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));
            for (SnapshotFiles point : snapshots) {
                newSnapshotsList.add(point);
            }
            final List<String> blobsToDelete;
            final String indexGeneration;
            if (writeShardGens) {
                indexGeneration = UUIDs.randomBase64UUID();
                blobsToDelete = Collections.emptyList();
            } else {
                indexGeneration = Long.toString(Long.parseLong(fileListGeneration) + 1);
                // Delete all previous index-N blobs
                blobsToDelete = blobs.stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList());
                assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))).max().orElse(-1L) < Long.parseLong(indexGeneration) : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + "] when deleting index-N blobs " + blobsToDelete;
            }
            try {
                writeShardIndexBlob(shardContainer, indexGeneration, new BlobStoreIndexShardSnapshots(newSnapshotsList));
            } catch (IOException e) {
                throw new IndexShardSnapshotFailedException(shardId, "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e);
            }
            if (writeShardGens == false) {
                try {
                    shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete);
                } catch (IOException e) {
                    LOGGER.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", snapshotId, shardId), e);
                }
            }
            snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), indexGeneration);
            listener.onResponse(indexGeneration);
        }, listener::onFailure);
        if (indexIncrementalFileCount == 0) {
            allFilesUploadedListener.onResponse(Collections.emptyList());
            return;
        }
        final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
        int maximumPoolSize = executor instanceof ThreadPoolExecutor ? ((ThreadPoolExecutor) executor).getMaximumPoolSize() : 1;
        // Start as many workers as fit into the snapshot pool at once at the most
        final int workers = Math.min(maximumPoolSize, indexIncrementalFileCount);
        final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, workers, allFilesUploadedListener);
        for (int i = 0; i < workers; ++i) {
            executor.execute(ActionRunnable.run(filesListener, () -> {
                BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS);
                if (snapshotFileInfo != null) {
                    store.incRef();
                    try {
                        do {
                            snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store);
                            snapshotFileInfo = filesToSnapshot.poll(0L, TimeUnit.MILLISECONDS);
                        } while (snapshotFileInfo != null);
                    } finally {
                        store.decRef();
                    }
                }
            }));
        }
    } catch (Exception e) {
        listener.onFailure(e);
    }
}
Also used : IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) ShardId(org.elasticsearch.index.shard.ShardId) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) ByteSizeUnit(org.elasticsearch.common.unit.ByteSizeUnit) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Map(java.util.Map) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) RateLimitingInputStream(org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream) IOContext(org.apache.lucene.store.IOContext) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) BlockingQueue(java.util.concurrent.BlockingQueue) StandardCharsets(java.nio.charset.StandardCharsets) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Stream(java.util.stream.Stream) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) BlobStore(org.elasticsearch.common.blobstore.BlobStore) SnapshotException(org.elasticsearch.snapshots.SnapshotException) FileInfo.canonicalName(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) IndexCommit(org.apache.lucene.index.IndexCommit) XContentFactory(org.elasticsearch.common.xcontent.XContentFactory) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Tuple(io.crate.common.collections.Tuple) ShardGenerations(org.elasticsearch.repositories.ShardGenerations) ClusterService(org.elasticsearch.cluster.service.ClusterService) SnapshotShardFailure(org.elasticsearch.snapshots.SnapshotShardFailure) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) LoggingDeprecationHandler(org.elasticsearch.common.xcontent.LoggingDeprecationHandler) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) Metadata(org.elasticsearch.cluster.metadata.Metadata) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Store(org.elasticsearch.index.store.Store) Nullable(javax.annotation.Nullable) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) Executor(java.util.concurrent.Executor) IOException(java.io.IOException) XContentParser(org.elasticsearch.common.xcontent.XContentParser) AtomicLong(java.util.concurrent.atomic.AtomicLong) CounterMetric(org.elasticsearch.common.metrics.CounterMetric) ActionListener(org.elasticsearch.action.ActionListener) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) RepositoryMetadata(org.elasticsearch.cluster.metadata.RepositoryMetadata) Settings(org.elasticsearch.common.settings.Settings) Locale(java.util.Locale) Streams(org.elasticsearch.common.io.Streams) ThreadPool(org.elasticsearch.threadpool.ThreadPool) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) ActionRunnable(org.elasticsearch.action.ActionRunnable) StepListener(org.elasticsearch.action.StepListener) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) RepositoryException(org.elasticsearch.repositories.RepositoryException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) NotXContentException(org.elasticsearch.common.compress.NotXContentException) Setting(org.elasticsearch.common.settings.Setting) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BlobMetadata(org.elasticsearch.common.blobstore.BlobMetadata) BytesReference(org.elasticsearch.common.bytes.BytesReference) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) Version(org.elasticsearch.Version) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) RepositoryData(org.elasticsearch.repositories.RepositoryData) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) XContentType(org.elasticsearch.common.xcontent.XContentType) IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) Index(org.elasticsearch.index.Index) Lucene(org.elasticsearch.common.lucene.Lucene) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexId(org.elasticsearch.repositories.IndexId) FilterInputStream(java.io.FilterInputStream) RepositoriesMetadata(org.elasticsearch.cluster.metadata.RepositoriesMetadata) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) BlobPath(org.elasticsearch.common.blobstore.BlobPath) IndexOutput(org.apache.lucene.store.IndexOutput) Numbers(org.elasticsearch.common.Numbers) Repository(org.elasticsearch.repositories.Repository) SnapshotsService(org.elasticsearch.snapshots.SnapshotsService) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) AbstractLifecycleComponent(org.elasticsearch.common.component.AbstractLifecycleComponent) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) RepositoryOperation(org.elasticsearch.repositories.RepositoryOperation) Snapshot(org.elasticsearch.snapshots.Snapshot) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) ArrayList(java.util.ArrayList) BlobStore(org.elasticsearch.common.blobstore.BlobStore) Store(org.elasticsearch.index.store.Store) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ShardId(org.elasticsearch.index.shard.ShardId) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) Executor(java.util.concurrent.Executor) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotException(org.elasticsearch.snapshots.SnapshotException) IOException(java.io.IOException) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) RepositoryException(org.elasticsearch.repositories.RepositoryException) NotXContentException(org.elasticsearch.common.compress.NotXContentException) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) Collection(java.util.Collection) StepListener(org.elasticsearch.action.StepListener) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Aggregations

IndexCommit (org.apache.lucene.index.IndexCommit)60 IOException (java.io.IOException)24 ArrayList (java.util.ArrayList)22 AtomicLong (java.util.concurrent.atomic.AtomicLong)11 Directory (org.apache.lucene.store.Directory)11 Test (org.junit.Test)10 IndexWriter (org.apache.lucene.index.IndexWriter)9 Store (org.elasticsearch.index.store.Store)9 Translog (org.elasticsearch.index.translog.Translog)8 List (java.util.List)7 Map (java.util.Map)7 SolrException (org.apache.solr.common.SolrException)7 NoSuchFileException (java.nio.file.NoSuchFileException)6 HashMap (java.util.HashMap)6 LongPoint (org.apache.lucene.document.LongPoint)6 DirectoryReader (org.apache.lucene.index.DirectoryReader)6 IndexReader (org.apache.lucene.index.IndexReader)6 UncheckedIOException (java.io.UncheckedIOException)5 Collections (java.util.Collections)5 IndexSettings (org.elasticsearch.index.IndexSettings)5