Search in sources :

Example 6 with IndexInput

use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.

the class KeyStoreWrapper method load.

/**
     * Loads information about the Elasticsearch keystore from the provided config directory.
     *
     * {@link #decrypt(char[])} must be called before reading or writing any entries.
     * Returns {@code null} if no keystore exists.
     */
public static KeyStoreWrapper load(Path configDir) throws IOException {
    Path keystoreFile = keystorePath(configDir);
    if (Files.exists(keystoreFile) == false) {
        return null;
    }
    SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
    try (IndexInput indexInput = directory.openInput(KEYSTORE_FILENAME, IOContext.READONCE)) {
        ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput);
        CodecUtil.checkHeader(input, KEYSTORE_FILENAME, FORMAT_VERSION, FORMAT_VERSION);
        byte hasPasswordByte = input.readByte();
        boolean hasPassword = hasPasswordByte == 1;
        if (hasPassword == false && hasPasswordByte != 0) {
            throw new IllegalStateException("hasPassword boolean is corrupt: " + String.format(Locale.ROOT, "%02x", hasPasswordByte));
        }
        String type = input.readString();
        String secretKeyAlgo = input.readString();
        byte[] keystoreBytes = new byte[input.readInt()];
        input.readBytes(keystoreBytes, 0, keystoreBytes.length);
        CodecUtil.checkFooter(input);
        return new KeyStoreWrapper(hasPassword, type, secretKeyAlgo, keystoreBytes);
    }
}
Also used : Path(java.nio.file.Path) ChecksumIndexInput(org.apache.lucene.store.ChecksumIndexInput) BufferedChecksumIndexInput(org.apache.lucene.store.BufferedChecksumIndexInput) BufferedChecksumIndexInput(org.apache.lucene.store.BufferedChecksumIndexInput) ChecksumIndexInput(org.apache.lucene.store.ChecksumIndexInput) BufferedChecksumIndexInput(org.apache.lucene.store.BufferedChecksumIndexInput) IndexInput(org.apache.lucene.store.IndexInput) SimpleFSDirectory(org.apache.lucene.store.SimpleFSDirectory)

Example 7 with IndexInput

use of org.apache.lucene.store.IndexInput in project crate by crate.

the class BlobRecoverySourceHandler method phase1.

/**
     * Perform phase1 of the recovery operations. Once this {@link SnapshotIndexCommit}
     * snapshot has been performed no commit operations (files being fsync'd)
     * are effectively allowed on this index until all recovery phases are done
     * <p/>
     * Phase1 examines the segment files on the target node and copies over the
     * segments that are missing. Only segments that have the same size and
     * checksum can be reused
     */
public void phase1(final SnapshotIndexCommit snapshot, final Translog.View translogView) {
    cancellableThreads.checkForCancel();
    // Total size of segment files that are recovered
    long totalSize = 0;
    // Total size of segment files that were able to be re-used
    long existingTotalSize = 0;
    final Store store = shard.store();
    store.incRef();
    try {
        // CRATE CHANGE
        if (blobRecoveryHandler != null) {
            blobRecoveryHandler.phase1();
        }
        StopWatch stopWatch = new StopWatch().start();
        final Store.MetadataSnapshot recoverySourceMetadata;
        try {
            recoverySourceMetadata = store.getMetadata(snapshot);
        } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
            shard.engine().failEngine("recovery", ex);
            throw ex;
        }
        for (String name : snapshot.getFiles()) {
            final StoreFileMetaData md = recoverySourceMetadata.get(name);
            if (md == null) {
                logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
                throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " + recoverySourceMetadata.asMap().size() + " files", name);
            }
        }
        // Generate a "diff" of all the identical, different, and missing
        // segment files on the target node, using the existing files on
        // the source node
        String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
        String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
        final boolean recoverWithSyncId = recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
        if (recoverWithSyncId) {
            final long numDocsTarget = request.metadataSnapshot().getNumDocs();
            final long numDocsSource = recoverySourceMetadata.getNumDocs();
            if (numDocsTarget != numDocsSource) {
                throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number of docs differ: " + numDocsTarget + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsSource + "(" + request.targetNode().getName() + ")");
            }
            // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.
            // so we don't return here
            logger.trace("[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target", indexName, shardId, request.targetNode(), recoverySourceSyncId);
        } else {
            final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
            for (StoreFileMetaData md : diff.identical) {
                response.phase1ExistingFileNames.add(md.name());
                response.phase1ExistingFileSizes.add(md.length());
                existingTotalSize += md.length();
                if (logger.isTraceEnabled()) {
                    logger.trace("[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}], size [{}]", indexName, shardId, request.targetNode(), md.name(), md.checksum(), md.length());
                }
                totalSize += md.length();
            }
            for (StoreFileMetaData md : Iterables.concat(diff.different, diff.missing)) {
                if (request.metadataSnapshot().asMap().containsKey(md.name())) {
                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", indexName, shardId, request.targetNode(), md.name(), request.metadataSnapshot().asMap().get(md.name()), md);
                } else {
                    logger.trace("[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote", indexName, shardId, request.targetNode(), md.name());
                }
                response.phase1FileNames.add(md.name());
                response.phase1FileSizes.add(md.length());
                totalSize += md.length();
            }
            response.phase1TotalSize = totalSize;
            response.phase1ExistingTotalSize = existingTotalSize;
            logger.trace("[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", indexName, shardId, request.targetNode(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
            cancellableThreads.execute(new Interruptable() {

                @Override
                public void run() throws InterruptedException {
                    RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, translogView.totalOperations());
                    transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                }
            });
            // This latch will be used to wait until all files have been transferred to the target node
            final CountDownLatch latch = new CountDownLatch(response.phase1FileNames.size());
            final CopyOnWriteArrayList<Throwable> exceptions = new CopyOnWriteArrayList<>();
            final AtomicReference<Throwable> corruptedEngine = new AtomicReference<>();
            int fileIndex = 0;
            ThreadPoolExecutor pool;
            // How many bytes we've copied since we last called RateLimiter.pause
            final AtomicLong bytesSinceLastPause = new AtomicLong();
            for (final String name : response.phase1FileNames) {
                long fileSize = response.phase1FileSizes.get(fileIndex);
                // separately.
                if (fileSize > RecoverySettings.SMALL_FILE_CUTOFF_BYTES) {
                    pool = recoverySettings.concurrentStreamPool();
                } else {
                    pool = recoverySettings.concurrentSmallFileStreamPool();
                }
                pool.execute(new AbstractRunnable() {

                    @Override
                    public void onFailure(Throwable t) {
                        // we either got rejected or the store can't be incremented / we are canceled
                        logger.debug("Failed to transfer file [" + name + "] on recovery");
                    }

                    @Override
                    public void onAfter() {
                        // Signify this file has completed by decrementing the latch
                        latch.countDown();
                    }

                    @Override
                    protected void doRun() {
                        cancellableThreads.checkForCancel();
                        store.incRef();
                        final StoreFileMetaData md = recoverySourceMetadata.get(name);
                        try (final IndexInput indexInput = store.directory().openInput(name, IOContext.READONCE)) {
                            // at least one!
                            final int BUFFER_SIZE = (int) Math.max(1, recoverySettings.fileChunkSize().getBytes());
                            final byte[] buf = new byte[BUFFER_SIZE];
                            boolean shouldCompressRequest = recoverySettings.compress();
                            if (CompressorFactory.isCompressed(indexInput)) {
                                shouldCompressRequest = false;
                            }
                            final long len = indexInput.length();
                            long readCount = 0;
                            final TransportRequestOptions requestOptions = TransportRequestOptions.builder().withCompress(shouldCompressRequest).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(recoverySettings.internalActionTimeout()).build();
                            while (readCount < len) {
                                if (shard.state() == IndexShardState.CLOSED) {
                                    // check if the shard got closed on us
                                    throw new IndexShardClosedException(shard.shardId());
                                }
                                int toRead = readCount + BUFFER_SIZE > len ? (int) (len - readCount) : BUFFER_SIZE;
                                final long position = indexInput.getFilePointer();
                                // Pause using the rate limiter, if desired, to throttle the recovery
                                RateLimiter rl = recoverySettings.rateLimiter();
                                long throttleTimeInNanos = 0;
                                if (rl != null) {
                                    long bytes = bytesSinceLastPause.addAndGet(toRead);
                                    if (bytes > rl.getMinPauseCheckBytes()) {
                                        // Time to pause
                                        bytesSinceLastPause.addAndGet(-bytes);
                                        throttleTimeInNanos = rl.pause(bytes);
                                        shard.recoveryStats().addThrottleTime(throttleTimeInNanos);
                                    }
                                }
                                indexInput.readBytes(buf, 0, toRead, false);
                                final BytesArray content = new BytesArray(buf, 0, toRead);
                                readCount += toRead;
                                final boolean lastChunk = readCount == len;
                                final RecoveryFileChunkRequest fileChunkRequest = new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content, lastChunk, translogView.totalOperations(), throttleTimeInNanos);
                                cancellableThreads.execute(new Interruptable() {

                                    @Override
                                    public void run() throws InterruptedException {
                                        // Actually send the file chunk to the target node, waiting for it to complete
                                        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, fileChunkRequest, requestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                                    }
                                });
                            }
                        } catch (Throwable e) {
                            final Throwable corruptIndexException;
                            if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) {
                                if (store.checkIntegrityNoException(md) == false) {
                                    // we are corrupted on the primary -- fail!
                                    logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
                                    if (corruptedEngine.compareAndSet(null, corruptIndexException) == false) {
                                        // if we are not the first exception, add ourselves as suppressed to the main one:
                                        corruptedEngine.get().addSuppressed(e);
                                    }
                                } else {
                                    // corruption has happened on the way to replica
                                    RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null);
                                    exception.addSuppressed(e);
                                    // last exception first
                                    exceptions.add(0, exception);
                                    logger.warn("{} Remote file corruption on node {}, recovering {}. local checksum OK", corruptIndexException, shard.shardId(), request.targetNode(), md);
                                }
                            } else {
                                // last exceptions first
                                exceptions.add(0, e);
                            }
                        } finally {
                            store.decRef();
                        }
                    }
                });
                fileIndex++;
            }
            cancellableThreads.execute(new Interruptable() {

                @Override
                public void run() throws InterruptedException {
                    // Wait for all files that need to be transferred to finish transferring
                    latch.await();
                }
            });
            if (corruptedEngine.get() != null) {
                shard.engine().failEngine("recovery", corruptedEngine.get());
                throw corruptedEngine.get();
            } else {
                ExceptionsHelper.rethrowAndSuppress(exceptions);
            }
            cancellableThreads.execute(new Interruptable() {

                @Override
                public void run() throws InterruptedException {
                    // are deleted
                    try {
                        transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, translogView.totalOperations()), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
                    } catch (RemoteTransportException remoteException) {
                        final IOException corruptIndexException;
                        //   - maybe due to old segments without checksums or length only checks
                        if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(remoteException)) != null) {
                            try {
                                final Store.MetadataSnapshot recoverySourceMetadata = store.getMetadata(snapshot);
                                StoreFileMetaData[] metadata = Iterables.toArray(recoverySourceMetadata, StoreFileMetaData.class);
                                ArrayUtil.timSort(metadata, new Comparator<StoreFileMetaData>() {

                                    @Override
                                    public int compare(StoreFileMetaData o1, StoreFileMetaData o2) {
                                        // check small files first
                                        return Long.compare(o1.length(), o2.length());
                                    }
                                });
                                for (StoreFileMetaData md : metadata) {
                                    logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md);
                                    if (store.checkIntegrityNoException(md) == false) {
                                        // we are corrupted on the primary -- fail!
                                        shard.engine().failEngine("recovery", corruptIndexException);
                                        logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
                                        throw corruptIndexException;
                                    }
                                }
                            } catch (IOException ex) {
                                remoteException.addSuppressed(ex);
                                throw remoteException;
                            }
                            // corruption has happened on the way to replica
                            RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but checksums are ok", null);
                            exception.addSuppressed(remoteException);
                            logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK", corruptIndexException, shard.shardId(), request.targetNode());
                            throw exception;
                        } else {
                            throw remoteException;
                        }
                    }
                }
            });
        }
        prepareTargetForTranslog(translogView);
        logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime());
        response.phase1Time = stopWatch.totalTime().millis();
    } catch (Throwable e) {
        throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
    } finally {
        store.decRef();
    }
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Store(org.elasticsearch.index.store.Store) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) IndexInput(org.apache.lucene.store.IndexInput) TransportRequestOptions(org.elasticsearch.transport.TransportRequestOptions) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) BytesArray(org.elasticsearch.common.bytes.BytesArray) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) Interruptable(org.elasticsearch.common.util.CancellableThreads.Interruptable) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) RateLimiter(org.apache.lucene.store.RateLimiter) StopWatch(org.elasticsearch.common.StopWatch) AtomicLong(java.util.concurrent.atomic.AtomicLong) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 8 with IndexInput

use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.

the class CorruptionUtils method corruptFile.

/**
     * Corrupts a random file at a random position
     */
public static void corruptFile(Random random, Path... files) throws IOException {
    assertTrue("files must be non-empty", files.length > 0);
    final Path fileToCorrupt = RandomPicks.randomFrom(random, files);
    assertTrue(fileToCorrupt + " is not a file", Files.isRegularFile(fileToCorrupt));
    try (Directory dir = FSDirectory.open(fileToCorrupt.toAbsolutePath().getParent())) {
        long checksumBeforeCorruption;
        try (IndexInput input = dir.openInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
            checksumBeforeCorruption = CodecUtil.retrieveChecksum(input);
        }
        try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
            // read
            raf.position(random.nextInt((int) Math.min(Integer.MAX_VALUE, raf.size())));
            long filePointer = raf.position();
            ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
            raf.read(bb);
            bb.flip();
            // corrupt
            byte oldValue = bb.get(0);
            byte newValue = (byte) (oldValue + 1);
            bb.put(0, newValue);
            // rewrite
            raf.position(filePointer);
            raf.write(bb);
            logger.info("Corrupting file --  flipping at position {} from {} to {} file: {}", filePointer, Integer.toHexString(oldValue), Integer.toHexString(newValue), fileToCorrupt.getFileName());
        }
        long checksumAfterCorruption;
        long actualChecksumAfterCorruption;
        try (ChecksumIndexInput input = dir.openChecksumInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
            assertThat(input.getFilePointer(), is(0L));
            // one long is the checksum... 8 bytes
            input.seek(input.length() - 8);
            checksumAfterCorruption = input.getChecksum();
            actualChecksumAfterCorruption = input.readLong();
        }
        // we need to add assumptions here that the checksums actually really don't match there is a small chance to get collisions
        // in the checksum which is ok though....
        StringBuilder msg = new StringBuilder();
        msg.append("before: [").append(checksumBeforeCorruption).append("] ");
        msg.append("after: [").append(checksumAfterCorruption).append("] ");
        msg.append("checksum value after corruption: ").append(actualChecksumAfterCorruption).append("] ");
        msg.append("file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
        logger.info("Checksum {}", msg);
        assumeTrue("Checksum collision - " + msg.toString(), // collision
        checksumAfterCorruption != checksumBeforeCorruption || // checksum corrupted
        actualChecksumAfterCorruption != checksumBeforeCorruption);
        assertThat("no file corrupted", fileToCorrupt, notNullValue());
    }
}
Also used : Path(java.nio.file.Path) ChecksumIndexInput(org.apache.lucene.store.ChecksumIndexInput) FileChannel(java.nio.channels.FileChannel) IndexInput(org.apache.lucene.store.IndexInput) ChecksumIndexInput(org.apache.lucene.store.ChecksumIndexInput) ByteBuffer(java.nio.ByteBuffer) Directory(org.apache.lucene.store.Directory) FSDirectory(org.apache.lucene.store.FSDirectory)

Example 9 with IndexInput

use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.

the class InputStreamIndexInputTests method testSingleReadSingleByteLimit.

public void testSingleReadSingleByteLimit() throws IOException {
    RAMDirectory dir = new RAMDirectory();
    IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
    for (int i = 0; i < 3; i++) {
        output.writeByte((byte) 1);
    }
    for (int i = 0; i < 3; i++) {
        output.writeByte((byte) 2);
    }
    output.close();
    IndexInput input = dir.openInput("test", IOContext.DEFAULT);
    for (int i = 0; i < 3; i++) {
        InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
        assertThat(input.getFilePointer(), lessThan(input.length()));
        assertThat(is.actualSizeToRead(), equalTo(1L));
        assertThat(is.read(), equalTo(1));
        assertThat(is.read(), equalTo(-1));
    }
    for (int i = 0; i < 3; i++) {
        InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
        assertThat(input.getFilePointer(), lessThan(input.length()));
        assertThat(is.actualSizeToRead(), equalTo(1L));
        assertThat(is.read(), equalTo(2));
        assertThat(is.read(), equalTo(-1));
    }
    assertThat(input.getFilePointer(), equalTo(input.length()));
    InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
    assertThat(is.actualSizeToRead(), equalTo(0L));
    assertThat(is.read(), equalTo(-1));
}
Also used : IndexInput(org.apache.lucene.store.IndexInput) IndexOutput(org.apache.lucene.store.IndexOutput) RAMDirectory(org.apache.lucene.store.RAMDirectory)

Example 10 with IndexInput

use of org.apache.lucene.store.IndexInput in project elasticsearch by elastic.

the class InputStreamIndexInputTests method testReadMultiFourBytesLimit.

public void testReadMultiFourBytesLimit() throws IOException {
    RAMDirectory dir = new RAMDirectory();
    IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
    for (int i = 0; i < 3; i++) {
        output.writeByte((byte) 1);
    }
    for (int i = 0; i < 3; i++) {
        output.writeByte((byte) 2);
    }
    output.close();
    IndexInput input = dir.openInput("test", IOContext.DEFAULT);
    byte[] read = new byte[4];
    assertThat(input.getFilePointer(), lessThan(input.length()));
    InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
    assertThat(is.actualSizeToRead(), equalTo(4L));
    assertThat(is.read(read), equalTo(4));
    assertThat(read[0], equalTo((byte) 1));
    assertThat(read[1], equalTo((byte) 1));
    assertThat(read[2], equalTo((byte) 1));
    assertThat(read[3], equalTo((byte) 2));
    assertThat(input.getFilePointer(), lessThan(input.length()));
    is = new InputStreamIndexInput(input, 4);
    assertThat(is.actualSizeToRead(), equalTo(2L));
    assertThat(is.read(read), equalTo(2));
    assertThat(read[0], equalTo((byte) 2));
    assertThat(read[1], equalTo((byte) 2));
    assertThat(input.getFilePointer(), equalTo(input.length()));
    is = new InputStreamIndexInput(input, 4);
    assertThat(is.actualSizeToRead(), equalTo(0L));
    assertThat(is.read(read), equalTo(-1));
}
Also used : IndexInput(org.apache.lucene.store.IndexInput) IndexOutput(org.apache.lucene.store.IndexOutput) RAMDirectory(org.apache.lucene.store.RAMDirectory)

Aggregations

IndexInput (org.apache.lucene.store.IndexInput)173 Directory (org.apache.lucene.store.Directory)75 IndexOutput (org.apache.lucene.store.IndexOutput)75 ChecksumIndexInput (org.apache.lucene.store.ChecksumIndexInput)49 IOException (java.io.IOException)26 RAMDirectory (org.apache.lucene.store.RAMDirectory)25 FilterDirectory (org.apache.lucene.store.FilterDirectory)23 CorruptIndexException (org.apache.lucene.index.CorruptIndexException)21 BytesRef (org.apache.lucene.util.BytesRef)18 ArrayList (java.util.ArrayList)17 BufferedChecksumIndexInput (org.apache.lucene.store.BufferedChecksumIndexInput)17 Test (org.junit.Test)17 BytesRefBuilder (org.apache.lucene.util.BytesRefBuilder)13 NodeBuilder (org.apache.jackrabbit.oak.spi.state.NodeBuilder)10 IndexFormatTooNewException (org.apache.lucene.index.IndexFormatTooNewException)10 IndexFormatTooOldException (org.apache.lucene.index.IndexFormatTooOldException)10 CorruptingIndexOutput (org.apache.lucene.store.CorruptingIndexOutput)10 NRTCachingDirectory (org.apache.lucene.store.NRTCachingDirectory)10 IntersectVisitor (org.apache.lucene.index.PointValues.IntersectVisitor)9 Relation (org.apache.lucene.index.PointValues.Relation)9