Search in sources :

Example 71 with IndexShard

use of org.elasticsearch.index.shard.IndexShard in project crate by crate.

the class PeerRecoveryTargetServiceTests method testResetStartingSeqNoIfLastCommitCorrupted.

@Test
public void testResetStartingSeqNoIfLastCommitCorrupted() throws Exception {
    IndexShard shard = newStartedShard(false);
    populateRandomData(shard);
    DiscoveryNode pNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT);
    DiscoveryNode rNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT);
    shard = reinitShard(shard, ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.PeerRecoverySource.INSTANCE));
    shard.markAsRecovering("peer recovery", new RecoveryState(shard.routingEntry(), pNode, rNode));
    shard.prepareForIndexRecovery();
    long startingSeqNo = shard.recoverLocallyUpToGlobalCheckpoint();
    shard.store().markStoreCorrupted(new IOException("simulated"));
    RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null);
    StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest(logger, rNode, recoveryTarget, startingSeqNo);
    assertThat(request.startingSeqNo(), equalTo(UNASSIGNED_SEQ_NO));
    assertThat(request.metadataSnapshot().size(), equalTo(0));
    recoveryTarget.decRef();
    closeShards(shard);
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) IndexShard(org.elasticsearch.index.shard.IndexShard) IOException(java.io.IOException) Test(org.junit.Test)

Example 72 with IndexShard

use of org.elasticsearch.index.shard.IndexShard in project crate by crate.

the class PeerRecoveryTargetServiceTests method testWriteFileChunksConcurrently.

@Test
public void testWriteFileChunksConcurrently() throws Exception {
    IndexShard sourceShard = newStartedShard(true);
    int numDocs = between(20, 100);
    for (int i = 0; i < numDocs; i++) {
        indexDoc(sourceShard, "_doc", Integer.toString(i));
    }
    sourceShard.flush(new FlushRequest());
    Store.MetadataSnapshot sourceSnapshot = sourceShard.store().getMetadata(null);
    List<StoreFileMetadata> mdFiles = new ArrayList<>();
    for (StoreFileMetadata md : sourceSnapshot) {
        mdFiles.add(md);
    }
    final IndexShard targetShard = newShard(false);
    final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId());
    final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId());
    targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode));
    final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null);
    final PlainActionFuture<Void> receiveFileInfoFuture = new PlainActionFuture<>();
    recoveryTarget.receiveFileInfo(mdFiles.stream().map(StoreFileMetadata::name).collect(Collectors.toList()), mdFiles.stream().map(StoreFileMetadata::length).collect(Collectors.toList()), Collections.emptyList(), Collections.emptyList(), 0, receiveFileInfoFuture);
    receiveFileInfoFuture.actionGet(5, TimeUnit.SECONDS);
    List<RecoveryFileChunkRequest> requests = new ArrayList<>();
    for (StoreFileMetadata md : mdFiles) {
        try (IndexInput in = sourceShard.store().directory().openInput(md.name(), IOContext.READONCE)) {
            int pos = 0;
            while (pos < md.length()) {
                int length = between(1, Math.toIntExact(md.length() - pos));
                byte[] buffer = new byte[length];
                in.readBytes(buffer, 0, length);
                requests.add(new RecoveryFileChunkRequest(0, sourceShard.shardId(), md, pos, new BytesArray(buffer), pos + length == md.length(), 1, 1));
                pos += length;
            }
        }
    }
    Randomness.shuffle(requests);
    BlockingQueue<RecoveryFileChunkRequest> queue = new ArrayBlockingQueue<>(requests.size());
    queue.addAll(requests);
    Thread[] senders = new Thread[between(1, 4)];
    CyclicBarrier barrier = new CyclicBarrier(senders.length);
    for (int i = 0; i < senders.length; i++) {
        senders[i] = new Thread(() -> {
            try {
                barrier.await();
                RecoveryFileChunkRequest r;
                while ((r = queue.poll()) != null) {
                    recoveryTarget.writeFileChunk(r.metadata(), r.position(), r.content(), r.lastChunk(), r.totalTranslogOps(), ActionListener.wrap(ignored -> {
                    }, e -> {
                        throw new AssertionError(e);
                    }));
                }
            } catch (Exception e) {
                throw new AssertionError(e);
            }
        });
        senders[i].start();
    }
    for (Thread sender : senders) {
        sender.join();
    }
    PlainActionFuture<Void> cleanFilesFuture = new PlainActionFuture<>();
    recoveryTarget.cleanFiles(0, Long.parseLong(sourceSnapshot.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)), sourceSnapshot, cleanFilesFuture);
    cleanFilesFuture.actionGet();
    recoveryTarget.decRef();
    Store.MetadataSnapshot targetSnapshot = targetShard.snapshotStoreMetadata();
    Store.RecoveryDiff diff = sourceSnapshot.recoveryDiff(targetSnapshot);
    assertThat(diff.different, empty());
    closeShards(sourceShard, targetShard);
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ArrayList(java.util.ArrayList) Store(org.elasticsearch.index.store.Store) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) FlushRequest(org.elasticsearch.action.admin.indices.flush.FlushRequest) IndexInput(org.apache.lucene.store.IndexInput) BytesArray(org.elasticsearch.common.bytes.BytesArray) IndexShard(org.elasticsearch.index.shard.IndexShard) IOException(java.io.IOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) PlainActionFuture(org.elasticsearch.action.support.PlainActionFuture) Test(org.junit.Test)

Example 73 with IndexShard

use of org.elasticsearch.index.shard.IndexShard in project crate by crate.

the class BlobStoreRepositoryRestoreTests method testRestoreSnapshotWithExistingFiles.

/**
 * Restoring a snapshot that contains multiple files must succeed even when
 * some files already exist in the shard's store.
 */
public void testRestoreSnapshotWithExistingFiles() throws IOException {
    final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
    final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
    IndexShard shard = newShard(shardId, true);
    try {
        // index documents in the shards
        final int numDocs = scaledRandomIntBetween(1, 500);
        recoverShardFromStore(shard);
        for (int i = 0; i < numDocs; i++) {
            indexDoc(shard, Integer.toString(i));
            if (rarely()) {
                flushShard(shard, false);
            }
        }
        assertDocCount(shard, numDocs);
        // snapshot the shard
        final Repository repository = createRepository();
        final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
        snapshotShard(shard, snapshot, repository);
        // capture current store files
        final Store.MetadataSnapshot storeFiles = shard.snapshotStoreMetadata();
        assertFalse(storeFiles.asMap().isEmpty());
        // close the shard
        closeShards(shard);
        // delete some random files in the store
        List<String> deletedFiles = randomSubsetOf(randomIntBetween(1, storeFiles.size() - 1), storeFiles.asMap().keySet());
        for (String deletedFile : deletedFiles) {
            Files.delete(shard.shardPath().resolveIndex().resolve(deletedFile));
        }
        // build a new shard using the same store directory as the closed shard
        ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE);
        shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetadata(), null, new InternalEngineFactory(), () -> {
        }, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER);
        // restore the shard
        recoverShardFromSnapshot(shard, snapshot, repository);
        // check that the shard is not corrupted
        TestUtil.checkIndex(shard.store().directory());
        // check that all files have been restored
        final Directory directory = shard.store().directory();
        final List<String> directoryFiles = Arrays.asList(directory.listAll());
        for (StoreFileMetadata storeFile : storeFiles) {
            String fileName = storeFile.name();
            assertTrue("File [" + fileName + "] does not exist in store directory", directoryFiles.contains(fileName));
            assertEquals(storeFile.length(), shard.store().directory().fileLength(fileName));
        }
    } finally {
        if (shard != null && shard.state() != IndexShardState.CLOSED) {
            try {
                shard.close("test", false);
            } finally {
                IOUtils.close(shard.store());
            }
        }
    }
}
Also used : IndexId(org.elasticsearch.repositories.IndexId) IndexShard(org.elasticsearch.index.shard.IndexShard) Store(org.elasticsearch.index.store.Store) Matchers.containsString(org.hamcrest.Matchers.containsString) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) ShardId(org.elasticsearch.index.shard.ShardId) Snapshot(org.elasticsearch.snapshots.Snapshot) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Repository(org.elasticsearch.repositories.Repository) FsRepository(org.elasticsearch.repositories.fs.FsRepository) InternalEngineFactory(org.elasticsearch.index.engine.InternalEngineFactory) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) Directory(org.apache.lucene.store.Directory)

Example 74 with IndexShard

use of org.elasticsearch.index.shard.IndexShard in project crate by crate.

the class BlobStoreRepositoryRestoreTests method testSnapshotWithConflictingName.

public void testSnapshotWithConflictingName() throws IOException {
    final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
    final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
    IndexShard shard = newShard(shardId, true);
    try {
        // index documents in the shards
        final int numDocs = scaledRandomIntBetween(1, 500);
        recoverShardFromStore(shard);
        for (int i = 0; i < numDocs; i++) {
            indexDoc(shard, Integer.toString(i));
            if (rarely()) {
                flushShard(shard, false);
            }
        }
        assertDocCount(shard, numDocs);
        // snapshot the shard
        final Repository repository = createRepository();
        final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
        final String shardGen = snapshotShard(shard, snapshot, repository);
        assertNotNull(shardGen);
        final Snapshot snapshotWithSameName = new Snapshot(repository.getMetadata().name(), new SnapshotId(snapshot.getSnapshotId().getName(), "_uuid2"));
        final PlainActionFuture<SnapshotInfo> future = PlainActionFuture.newFuture();
        repository.finalizeSnapshot(snapshot.getSnapshotId(), ShardGenerations.builder().put(indexId, 0, shardGen).build(), 0L, null, 1, Collections.emptyList(), -1L, false, Metadata.builder().put(shard.indexSettings().getIndexMetadata(), false).build(), true, future);
        future.actionGet();
        IndexShardSnapshotFailedException isfe = expectThrows(IndexShardSnapshotFailedException.class, () -> snapshotShard(shard, snapshotWithSameName, repository));
        assertThat(isfe.getMessage(), containsString("Duplicate snapshot name"));
    } finally {
        if (shard != null && shard.state() != IndexShardState.CLOSED) {
            try {
                shard.close("test", false);
            } finally {
                IOUtils.close(shard.store());
            }
        }
    }
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) IndexId(org.elasticsearch.repositories.IndexId) Snapshot(org.elasticsearch.snapshots.Snapshot) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Repository(org.elasticsearch.repositories.Repository) FsRepository(org.elasticsearch.repositories.fs.FsRepository) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) IndexShard(org.elasticsearch.index.shard.IndexShard) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) Matchers.containsString(org.hamcrest.Matchers.containsString)

Example 75 with IndexShard

use of org.elasticsearch.index.shard.IndexShard in project crate by crate.

the class PeerRecoverySourceServiceTests method testDuplicateRecoveries.

@Test
public void testDuplicateRecoveries() throws IOException {
    IndexShard primary = newStartedShard(true);
    PeerRecoverySourceService peerRecoverySourceService = new PeerRecoverySourceService(mock(TransportService.class), mock(IndicesService.class), new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)));
    StartRecoveryRequest startRecoveryRequest = new StartRecoveryRequest(primary.shardId(), randomAlphaOfLength(10), getFakeDiscoNode("source"), getFakeDiscoNode("target"), Store.MetadataSnapshot.EMPTY, randomBoolean(), randomLong(), SequenceNumbers.UNASSIGNED_SEQ_NO);
    peerRecoverySourceService.start();
    RecoverySourceHandler handler = peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary);
    DelayRecoveryException delayRecoveryException = expectThrows(DelayRecoveryException.class, () -> peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary));
    assertThat(delayRecoveryException.getMessage(), containsString("recovery with same target already registered"));
    peerRecoverySourceService.ongoingRecoveries.remove(primary, handler);
    // re-adding after removing previous attempt works
    handler = peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary);
    peerRecoverySourceService.ongoingRecoveries.remove(primary, handler);
    closeShards(primary);
}
Also used : ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) TransportService(org.elasticsearch.transport.TransportService) IndexShard(org.elasticsearch.index.shard.IndexShard) IndicesService(org.elasticsearch.indices.IndicesService) Test(org.junit.Test)

Aggregations

IndexShard (org.elasticsearch.index.shard.IndexShard)173 IndexService (org.elasticsearch.index.IndexService)74 ShardId (org.elasticsearch.index.shard.ShardId)49 IndicesService (org.elasticsearch.indices.IndicesService)47 ShardRouting (org.elasticsearch.cluster.routing.ShardRouting)36 Test (org.junit.Test)35 IOException (java.io.IOException)29 Engine (org.elasticsearch.index.engine.Engine)26 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)21 ElasticsearchException (org.elasticsearch.ElasticsearchException)19 CountDownLatch (java.util.concurrent.CountDownLatch)18 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)18 Settings (org.elasticsearch.common.settings.Settings)18 ArrayList (java.util.ArrayList)16 Translog (org.elasticsearch.index.translog.Translog)16 HashMap (java.util.HashMap)15 Index (org.elasticsearch.index.Index)15 IndexMetaData (org.elasticsearch.cluster.metadata.IndexMetaData)13 PlainActionFuture (org.elasticsearch.action.support.PlainActionFuture)12 List (java.util.List)11