Search in sources :

Example 1 with CorruptIndexException

use of org.apache.lucene.index.CorruptIndexException in project elasticsearch by elastic.

the class RecoverySourceHandler method phase1.

/**
     * Perform phase1 of the recovery operations. Once this {@link IndexCommit}
     * snapshot has been performed no commit operations (files being fsync'd)
     * are effectively allowed on this index until all recovery phases are done
     * <p>
     * Phase1 examines the segment files on the target node and copies over the
     * segments that are missing. Only segments that have the same size and
     * checksum can be reused
     */
public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
    cancellableThreads.checkForCancel();
    // Total size of segment files that are recovered
    long totalSize = 0;
    // Total size of segment files that were able to be re-used
    long existingTotalSize = 0;
    final Store store = shard.store();
    store.incRef();
    try {
        StopWatch stopWatch = new StopWatch().start();
        final Store.MetadataSnapshot recoverySourceMetadata;
        try {
            recoverySourceMetadata = store.getMetadata(snapshot);
        } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
            shard.failShard("recovery", ex);
            throw ex;
        }
        for (String name : snapshot.getFileNames()) {
            final StoreFileMetaData md = recoverySourceMetadata.get(name);
            if (md == null) {
                logger.info("Snapshot differs from actual index for file: {} meta: {}", name, recoverySourceMetadata.asMap());
                throw new CorruptIndexException("Snapshot differs from actual index - maybe index was removed metadata has " + recoverySourceMetadata.asMap().size() + " files", name);
            }
        }
        // Generate a "diff" of all the identical, different, and missing
        // segment files on the target node, using the existing files on
        // the source node
        String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
        String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
        final boolean recoverWithSyncId = recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
        if (recoverWithSyncId) {
            final long numDocsTarget = request.metadataSnapshot().getNumDocs();
            final long numDocsSource = recoverySourceMetadata.getNumDocs();
            if (numDocsTarget != numDocsSource) {
                throw new IllegalStateException("try to recover " + request.shardId() + " from primary shard with sync id but number " + "of docs differ: " + numDocsSource + " (" + request.sourceNode().getName() + ", primary) vs " + numDocsTarget + "(" + request.targetNode().getName() + ")");
            }
            // we shortcut recovery here because we have nothing to copy. but we must still start the engine on the target.
            // so we don't return here
            logger.trace("skipping [phase1]- identical sync id [{}] found on both source and target", recoverySourceSyncId);
        } else {
            final Store.RecoveryDiff diff = recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
            for (StoreFileMetaData md : diff.identical) {
                response.phase1ExistingFileNames.add(md.name());
                response.phase1ExistingFileSizes.add(md.length());
                existingTotalSize += md.length();
                if (logger.isTraceEnabled()) {
                    logger.trace("recovery [phase1]: not recovering [{}], exist in local store and has checksum [{}]," + " size [{}]", md.name(), md.checksum(), md.length());
                }
                totalSize += md.length();
            }
            List<StoreFileMetaData> phase1Files = new ArrayList<>(diff.different.size() + diff.missing.size());
            phase1Files.addAll(diff.different);
            phase1Files.addAll(diff.missing);
            for (StoreFileMetaData md : phase1Files) {
                if (request.metadataSnapshot().asMap().containsKey(md.name())) {
                    logger.trace("recovery [phase1]: recovering [{}], exists in local store, but is different: remote [{}], local [{}]", md.name(), request.metadataSnapshot().asMap().get(md.name()), md);
                } else {
                    logger.trace("recovery [phase1]: recovering [{}], does not exist in remote", md.name());
                }
                response.phase1FileNames.add(md.name());
                response.phase1FileSizes.add(md.length());
                totalSize += md.length();
            }
            response.phase1TotalSize = totalSize;
            response.phase1ExistingTotalSize = existingTotalSize;
            logger.trace("recovery [phase1]: recovering_files [{}] with total_size [{}], reusing_files [{}] with total_size [{}]", response.phase1FileNames.size(), new ByteSizeValue(totalSize), response.phase1ExistingFileNames.size(), new ByteSizeValue(existingTotalSize));
            cancellableThreads.execute(() -> recoveryTarget.receiveFileInfo(response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, translogView.totalOperations()));
            // How many bytes we've copied since we last called RateLimiter.pause
            final Function<StoreFileMetaData, OutputStream> outputStreamFactories = md -> new BufferedOutputStream(new RecoveryOutputStream(md, translogView), chunkSizeInBytes);
            sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
            // are deleted
            try {
                cancellableThreads.executeIO(() -> recoveryTarget.cleanFiles(translogView.totalOperations(), recoverySourceMetadata));
            } catch (RemoteTransportException | IOException targetException) {
                final IOException corruptIndexException;
                //   - maybe due to old segments without checksums or length only checks
                if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException)) != null) {
                    try {
                        final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
                        StoreFileMetaData[] metadata = StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(size -> new StoreFileMetaData[size]);
                        ArrayUtil.timSort(metadata, (o1, o2) -> {
                            // check small files first
                            return Long.compare(o1.length(), o2.length());
                        });
                        for (StoreFileMetaData md : metadata) {
                            cancellableThreads.checkForCancel();
                            logger.debug("checking integrity for file {} after remove corruption exception", md);
                            if (store.checkIntegrityNoException(md) == false) {
                                // we are corrupted on the primary -- fail!
                                shard.failShard("recovery", corruptIndexException);
                                logger.warn("Corrupted file detected {} checksum mismatch", md);
                                throw corruptIndexException;
                            }
                        }
                    } catch (IOException ex) {
                        targetException.addSuppressed(ex);
                        throw targetException;
                    }
                    // corruption has happened on the way to replica
                    RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null);
                    exception.addSuppressed(targetException);
                    logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", shard.shardId(), request.targetNode()), corruptIndexException);
                    throw exception;
                } else {
                    throw targetException;
                }
            }
        }
        logger.trace("recovery [phase1]: took [{}]", stopWatch.totalTime());
        response.phase1Time = stopWatch.totalTime().millis();
    } catch (Exception e) {
        throw new RecoverFilesRecoveryException(request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
    } finally {
        store.decRef();
    }
}
Also used : IndexCommit(org.apache.lucene.index.IndexCommit) CancellableThreads(org.elasticsearch.common.util.CancellableThreads) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) Nullable(org.elasticsearch.common.Nullable) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Function(java.util.function.Function) Supplier(java.util.function.Supplier) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) BufferedOutputStream(java.io.BufferedOutputStream) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) Settings(org.elasticsearch.common.settings.Settings) SequenceNumbersService(org.elasticsearch.index.seqno.SequenceNumbersService) Store(org.elasticsearch.index.store.Store) LocalCheckpointTracker(org.elasticsearch.index.seqno.LocalCheckpointTracker) Streams(org.elasticsearch.common.io.Streams) StreamSupport(java.util.stream.StreamSupport) IOContext(org.apache.lucene.store.IOContext) Releasable(org.elasticsearch.common.lease.Releasable) Loggers(org.elasticsearch.common.logging.Loggers) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) OutputStream(java.io.OutputStream) ArrayUtil(org.apache.lucene.util.ArrayUtil) IndexShardState(org.elasticsearch.index.shard.IndexShardState) IndexInput(org.apache.lucene.store.IndexInput) IndexShard(org.elasticsearch.index.shard.IndexShard) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) StopWatch(org.elasticsearch.common.StopWatch) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) List(java.util.List) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) Translog(org.elasticsearch.index.translog.Translog) RateLimiter(org.apache.lucene.store.RateLimiter) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) ArrayList(java.util.ArrayList) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Store(org.elasticsearch.index.store.Store) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) Supplier(java.util.function.Supplier) BufferedOutputStream(java.io.BufferedOutputStream) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) IndexShardClosedException(org.elasticsearch.index.shard.IndexShardClosedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) RecoveryEngineException(org.elasticsearch.index.engine.RecoveryEngineException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) StopWatch(org.elasticsearch.common.StopWatch) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException)

Example 2 with CorruptIndexException

use of org.apache.lucene.index.CorruptIndexException in project elasticsearch by elastic.

the class RecoveryTarget method cleanFiles.

@Override
public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException {
    state().getTranslog().totalOperations(totalTranslogOps);
    // first, we go and move files that were created with the recovery id suffix to
    // the actual names, its ok if we have a corrupted index here, since we have replicas
    // to recover from in case of a full cluster shutdown just when this code executes...
    renameAllTempFiles();
    final Store store = store();
    try {
        store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
    } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
        // its content on disk if possible.
        try {
            try {
                store.removeCorruptionMarker();
            } finally {
                // clean up and delete all files
                Lucene.cleanLuceneIndex(store.directory());
            }
        } catch (Exception e) {
            logger.debug("Failed to clean lucene index", e);
            ex.addSuppressed(e);
        }
        RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex);
        fail(rfe, true);
        throw rfe;
    } catch (Exception ex) {
        RecoveryFailedException rfe = new RecoveryFailedException(state(), "failed to clean after recovery", ex);
        fail(rfe, true);
        throw rfe;
    }
}
Also used : IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) Store(org.elasticsearch.index.store.Store) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) ElasticsearchException(org.elasticsearch.ElasticsearchException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) TimeoutException(java.util.concurrent.TimeoutException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException)

Example 3 with CorruptIndexException

use of org.apache.lucene.index.CorruptIndexException in project elasticsearch by elastic.

the class ReplicationOperationTests method testReplication.

public void testReplication() throws Exception {
    final String index = "test";
    final ShardId shardId = new ShardId(index, "_na_", 0);
    ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
    IndexMetaData indexMetaData = state.getMetaData().index(index);
    final long primaryTerm = indexMetaData.primaryTerm(0);
    final IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().shardRoutingTable(shardId);
    ShardRouting primaryShard = indexShardRoutingTable.primaryShard();
    if (primaryShard.relocating() && randomBoolean()) {
        // simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
        state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build();
        primaryShard = primaryShard.getTargetRelocatingShard();
    }
    // add a few in-sync allocation ids that don't have corresponding routing entries
    Set<String> staleAllocationIds = Sets.newHashSet(generateRandomStringArray(4, 10, false));
    state = ClusterState.builder(state).metaData(MetaData.builder(state.metaData()).put(IndexMetaData.builder(indexMetaData).putInSyncAllocationIds(0, Sets.union(indexMetaData.inSyncAllocationIds(0), staleAllocationIds)))).build();
    final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, state);
    final Map<ShardRouting, Exception> expectedFailures = new HashMap<>();
    final Set<ShardRouting> expectedFailedShards = new HashSet<>();
    for (ShardRouting replica : expectedReplicas) {
        if (randomBoolean()) {
            Exception t;
            boolean criticalFailure = randomBoolean();
            if (criticalFailure) {
                t = new CorruptIndexException("simulated", (String) null);
            } else {
                t = new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING);
            }
            logger.debug("--> simulating failure on {} with [{}]", replica, t.getClass().getSimpleName());
            expectedFailures.put(replica, t);
            if (criticalFailure) {
                expectedFailedShards.add(replica);
            }
        }
    }
    Request request = new Request(shardId);
    PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
    final ClusterState finalState = state;
    final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures);
    final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm);
    final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, () -> finalState);
    op.execute();
    assertThat(request.primaryTerm(), equalTo(primaryTerm));
    assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
    assertThat(request.processedOnReplicas, equalTo(expectedReplicas));
    assertThat(replicasProxy.failedReplicas, equalTo(expectedFailedShards));
    assertThat(replicasProxy.markedAsStaleCopies, equalTo(staleAllocationIds));
    assertTrue("listener is not marked as done", listener.isDone());
    ShardInfo shardInfo = listener.actionGet().getShardInfo();
    assertThat(shardInfo.getFailed(), equalTo(expectedFailedShards.size()));
    assertThat(shardInfo.getFailures(), arrayWithSize(expectedFailedShards.size()));
    assertThat(shardInfo.getSuccessful(), equalTo(1 + expectedReplicas.size() - expectedFailures.size()));
    final List<ShardRouting> unassignedShards = indexShardRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED);
    final int totalShards = 1 + expectedReplicas.size() + unassignedShards.size();
    assertThat(shardInfo.getTotal(), equalTo(totalShards));
    assertThat(primary.knownLocalCheckpoints.remove(primaryShard.allocationId().getId()), equalTo(primary.localCheckpoint));
    assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints));
}
Also used : IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) HashMap(java.util.HashMap) ShardId(org.elasticsearch.index.shard.ShardId) HashSet(java.util.HashSet) ShardInfo(org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo) ClusterState(org.elasticsearch.cluster.ClusterState) IndexShardNotStartedException(org.elasticsearch.index.shard.IndexShardNotStartedException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ElasticsearchException(org.elasticsearch.ElasticsearchException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexShardNotStartedException(org.elasticsearch.index.shard.IndexShardNotStartedException) UnavailableShardsException(org.elasticsearch.action.UnavailableShardsException) ExecutionException(java.util.concurrent.ExecutionException) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) PlainActionFuture(org.elasticsearch.action.support.PlainActionFuture) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting)

Example 4 with CorruptIndexException

use of org.apache.lucene.index.CorruptIndexException in project elasticsearch by elastic.

the class ReplicationOperationTests method testDemotedPrimary.

public void testDemotedPrimary() throws Exception {
    final String index = "test";
    final ShardId shardId = new ShardId(index, "_na_", 0);
    ClusterState state = stateWithActivePrimary(index, true, 1 + randomInt(2), randomInt(2));
    IndexMetaData indexMetaData = state.getMetaData().index(index);
    final long primaryTerm = indexMetaData.primaryTerm(0);
    ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
    if (primaryShard.relocating() && randomBoolean()) {
        // simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
        state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build();
        primaryShard = primaryShard.getTargetRelocatingShard();
    }
    // add in-sync allocation id that doesn't have a corresponding routing entry
    state = ClusterState.builder(state).metaData(MetaData.builder(state.metaData()).put(IndexMetaData.builder(indexMetaData).putInSyncAllocationIds(0, Sets.union(indexMetaData.inSyncAllocationIds(0), Sets.newHashSet(randomAsciiOfLength(10)))))).build();
    final Set<ShardRouting> expectedReplicas = getExpectedReplicas(shardId, state);
    final Map<ShardRouting, Exception> expectedFailures = new HashMap<>();
    final ShardRouting failedReplica = randomFrom(new ArrayList<>(expectedReplicas));
    expectedFailures.put(failedReplica, new CorruptIndexException("simulated", (String) null));
    Request request = new Request(shardId);
    PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
    final ClusterState finalState = state;
    final boolean testPrimaryDemotedOnStaleShardCopies = randomBoolean();
    final TestReplicaProxy replicasProxy = new TestReplicaProxy(expectedFailures) {

        @Override
        public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess, Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
            if (testPrimaryDemotedOnStaleShardCopies) {
                super.failShardIfNeeded(replica, primaryTerm, message, exception, onSuccess, onPrimaryDemoted, onIgnoredFailure);
            } else {
                assertThat(replica, equalTo(failedReplica));
                onPrimaryDemoted.accept(new ElasticsearchException("the king is dead"));
            }
        }

        @Override
        public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess, Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
            if (testPrimaryDemotedOnStaleShardCopies) {
                onPrimaryDemoted.accept(new ElasticsearchException("the king is dead"));
            } else {
                super.markShardCopyAsStaleIfNeeded(shardId, allocationId, primaryTerm, onSuccess, onPrimaryDemoted, onIgnoredFailure);
            }
        }
    };
    AtomicBoolean primaryFailed = new AtomicBoolean();
    final TestPrimary primary = new TestPrimary(primaryShard, primaryTerm) {

        @Override
        public void failShard(String message, Exception exception) {
            assertTrue(primaryFailed.compareAndSet(false, true));
        }
    };
    final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, () -> finalState);
    op.execute();
    assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
    assertTrue("listener is not marked as done", listener.isDone());
    assertTrue(primaryFailed.get());
    assertListenerThrows("should throw exception to trigger retry", listener, ReplicationOperation.RetryOnPrimaryException.class);
}
Also used : HashMap(java.util.HashMap) ElasticsearchException(org.elasticsearch.ElasticsearchException) ShardId(org.elasticsearch.index.shard.ShardId) Consumer(java.util.function.Consumer) ClusterState(org.elasticsearch.cluster.ClusterState) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ElasticsearchException(org.elasticsearch.ElasticsearchException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexShardNotStartedException(org.elasticsearch.index.shard.IndexShardNotStartedException) UnavailableShardsException(org.elasticsearch.action.UnavailableShardsException) ExecutionException(java.util.concurrent.ExecutionException) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PlainActionFuture(org.elasticsearch.action.support.PlainActionFuture) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting)

Example 5 with CorruptIndexException

use of org.apache.lucene.index.CorruptIndexException in project elasticsearch by elastic.

the class RecoverySourceHandlerTests method testHandleExceptinoOnSendSendFiles.

public void testHandleExceptinoOnSendSendFiles() throws Throwable {
    Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1).put("indices.recovery.concurrent_small_file_streams", 1).build();
    final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
    final StartRecoveryRequest request = new StartRecoveryRequest(shardId, new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, randomBoolean(), randomNonNegativeLong(), randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : 0L);
    Path tempDir = createTempDir();
    Store store = newStore(tempDir, false);
    AtomicBoolean failedEngine = new AtomicBoolean(false);
    RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, () -> 0L, e -> () -> {
    }, recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) {

        @Override
        protected void failEngine(IOException cause) {
            assertFalse(failedEngine.get());
            failedEngine.set(true);
        }
    };
    Directory dir = store.directory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
    int numDocs = randomIntBetween(10, 100);
    for (int i = 0; i < numDocs; i++) {
        Document document = new Document();
        document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
        document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
        writer.addDocument(document);
    }
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata = store.getMetadata(null);
    List<StoreFileMetaData> metas = new ArrayList<>();
    for (StoreFileMetaData md : metadata) {
        metas.add(md);
    }
    final boolean throwCorruptedIndexException = randomBoolean();
    Store targetStore = newStore(createTempDir(), false);
    try {
        handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), (md) -> {
            if (throwCorruptedIndexException) {
                throw new RuntimeException(new CorruptIndexException("foo", "bar"));
            } else {
                throw new RuntimeException("boom");
            }
        });
        fail("exception index");
    } catch (RuntimeException ex) {
        assertNull(ExceptionsHelper.unwrapCorruption(ex));
        if (throwCorruptedIndexException) {
            assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]");
        } else {
            assertEquals(ex.getMessage(), "boom");
        }
    } catch (CorruptIndexException ex) {
        fail("not expected here");
    }
    assertFalse(failedEngine.get());
    IOUtils.close(store, targetStore);
}
Also used : Path(java.nio.file.Path) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ArrayList(java.util.ArrayList) Store(org.elasticsearch.index.store.Store) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IOException(java.io.IOException) Document(org.apache.lucene.document.Document) ParsedDocument(org.elasticsearch.index.mapper.ParsedDocument) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) StoreFileMetaData(org.elasticsearch.index.store.StoreFileMetaData) StringField(org.apache.lucene.document.StringField) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter) Directory(org.apache.lucene.store.Directory)

Aggregations

CorruptIndexException (org.apache.lucene.index.CorruptIndexException)91 IndexFormatTooNewException (org.apache.lucene.index.IndexFormatTooNewException)35 IndexFormatTooOldException (org.apache.lucene.index.IndexFormatTooOldException)35 Directory (org.apache.lucene.store.Directory)26 IOException (java.io.IOException)24 ChecksumIndexInput (org.apache.lucene.store.ChecksumIndexInput)24 IndexInput (org.apache.lucene.store.IndexInput)24 IndexOutput (org.apache.lucene.store.IndexOutput)23 ArrayList (java.util.ArrayList)16 RAMDirectory (org.apache.lucene.store.RAMDirectory)15 BytesRef (org.apache.lucene.util.BytesRef)14 FileNotFoundException (java.io.FileNotFoundException)12 ShardId (org.elasticsearch.index.shard.ShardId)12 NoSuchFileException (java.nio.file.NoSuchFileException)11 IOContext (org.apache.lucene.store.IOContext)11 EOFException (java.io.EOFException)10 HashMap (java.util.HashMap)10 AlreadyClosedException (org.apache.lucene.store.AlreadyClosedException)10 FilterDirectory (org.apache.lucene.store.FilterDirectory)10 Document (org.apache.lucene.document.Document)8