use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class PeerRecoveryTargetServiceTests method testResetStartingSeqNoIfLastCommitCorrupted.
@Test
public void testResetStartingSeqNoIfLastCommitCorrupted() throws Exception {
IndexShard shard = newStartedShard(false);
populateRandomData(shard);
DiscoveryNode pNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT);
DiscoveryNode rNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT);
shard = reinitShard(shard, ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.PeerRecoverySource.INSTANCE));
shard.markAsRecovering("peer recovery", new RecoveryState(shard.routingEntry(), pNode, rNode));
shard.prepareForIndexRecovery();
long startingSeqNo = shard.recoverLocallyUpToGlobalCheckpoint();
shard.store().markStoreCorrupted(new IOException("simulated"));
RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null);
StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest(logger, rNode, recoveryTarget, startingSeqNo);
assertThat(request.startingSeqNo(), equalTo(UNASSIGNED_SEQ_NO));
assertThat(request.metadataSnapshot().size(), equalTo(0));
recoveryTarget.decRef();
closeShards(shard);
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class PeerRecoveryTargetServiceTests method testWriteFileChunksConcurrently.
@Test
public void testWriteFileChunksConcurrently() throws Exception {
IndexShard sourceShard = newStartedShard(true);
int numDocs = between(20, 100);
for (int i = 0; i < numDocs; i++) {
indexDoc(sourceShard, "_doc", Integer.toString(i));
}
sourceShard.flush(new FlushRequest());
Store.MetadataSnapshot sourceSnapshot = sourceShard.store().getMetadata(null);
List<StoreFileMetadata> mdFiles = new ArrayList<>();
for (StoreFileMetadata md : sourceSnapshot) {
mdFiles.add(md);
}
final IndexShard targetShard = newShard(false);
final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId());
final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId());
targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode));
final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null);
final PlainActionFuture<Void> receiveFileInfoFuture = new PlainActionFuture<>();
recoveryTarget.receiveFileInfo(mdFiles.stream().map(StoreFileMetadata::name).collect(Collectors.toList()), mdFiles.stream().map(StoreFileMetadata::length).collect(Collectors.toList()), Collections.emptyList(), Collections.emptyList(), 0, receiveFileInfoFuture);
receiveFileInfoFuture.actionGet(5, TimeUnit.SECONDS);
List<RecoveryFileChunkRequest> requests = new ArrayList<>();
for (StoreFileMetadata md : mdFiles) {
try (IndexInput in = sourceShard.store().directory().openInput(md.name(), IOContext.READONCE)) {
int pos = 0;
while (pos < md.length()) {
int length = between(1, Math.toIntExact(md.length() - pos));
byte[] buffer = new byte[length];
in.readBytes(buffer, 0, length);
requests.add(new RecoveryFileChunkRequest(0, sourceShard.shardId(), md, pos, new BytesArray(buffer), pos + length == md.length(), 1, 1));
pos += length;
}
}
}
Randomness.shuffle(requests);
BlockingQueue<RecoveryFileChunkRequest> queue = new ArrayBlockingQueue<>(requests.size());
queue.addAll(requests);
Thread[] senders = new Thread[between(1, 4)];
CyclicBarrier barrier = new CyclicBarrier(senders.length);
for (int i = 0; i < senders.length; i++) {
senders[i] = new Thread(() -> {
try {
barrier.await();
RecoveryFileChunkRequest r;
while ((r = queue.poll()) != null) {
recoveryTarget.writeFileChunk(r.metadata(), r.position(), r.content(), r.lastChunk(), r.totalTranslogOps(), ActionListener.wrap(ignored -> {
}, e -> {
throw new AssertionError(e);
}));
}
} catch (Exception e) {
throw new AssertionError(e);
}
});
senders[i].start();
}
for (Thread sender : senders) {
sender.join();
}
PlainActionFuture<Void> cleanFilesFuture = new PlainActionFuture<>();
recoveryTarget.cleanFiles(0, Long.parseLong(sourceSnapshot.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)), sourceSnapshot, cleanFilesFuture);
cleanFilesFuture.actionGet();
recoveryTarget.decRef();
Store.MetadataSnapshot targetSnapshot = targetShard.snapshotStoreMetadata();
Store.RecoveryDiff diff = sourceSnapshot.recoveryDiff(targetSnapshot);
assertThat(diff.different, empty());
closeShards(sourceShard, targetShard);
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class BlobStoreRepositoryRestoreTests method testRestoreSnapshotWithExistingFiles.
/**
* Restoring a snapshot that contains multiple files must succeed even when
* some files already exist in the shard's store.
*/
public void testRestoreSnapshotWithExistingFiles() throws IOException {
final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
IndexShard shard = newShard(shardId, true);
try {
// index documents in the shards
final int numDocs = scaledRandomIntBetween(1, 500);
recoverShardFromStore(shard);
for (int i = 0; i < numDocs; i++) {
indexDoc(shard, Integer.toString(i));
if (rarely()) {
flushShard(shard, false);
}
}
assertDocCount(shard, numDocs);
// snapshot the shard
final Repository repository = createRepository();
final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
snapshotShard(shard, snapshot, repository);
// capture current store files
final Store.MetadataSnapshot storeFiles = shard.snapshotStoreMetadata();
assertFalse(storeFiles.asMap().isEmpty());
// close the shard
closeShards(shard);
// delete some random files in the store
List<String> deletedFiles = randomSubsetOf(randomIntBetween(1, storeFiles.size() - 1), storeFiles.asMap().keySet());
for (String deletedFile : deletedFiles) {
Files.delete(shard.shardPath().resolveIndex().resolve(deletedFile));
}
// build a new shard using the same store directory as the closed shard
ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE);
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetadata(), null, new InternalEngineFactory(), () -> {
}, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER);
// restore the shard
recoverShardFromSnapshot(shard, snapshot, repository);
// check that the shard is not corrupted
TestUtil.checkIndex(shard.store().directory());
// check that all files have been restored
final Directory directory = shard.store().directory();
final List<String> directoryFiles = Arrays.asList(directory.listAll());
for (StoreFileMetadata storeFile : storeFiles) {
String fileName = storeFile.name();
assertTrue("File [" + fileName + "] does not exist in store directory", directoryFiles.contains(fileName));
assertEquals(storeFile.length(), shard.store().directory().fileLength(fileName));
}
} finally {
if (shard != null && shard.state() != IndexShardState.CLOSED) {
try {
shard.close("test", false);
} finally {
IOUtils.close(shard.store());
}
}
}
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class BlobStoreRepositoryRestoreTests method testSnapshotWithConflictingName.
public void testSnapshotWithConflictingName() throws IOException {
final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
IndexShard shard = newShard(shardId, true);
try {
// index documents in the shards
final int numDocs = scaledRandomIntBetween(1, 500);
recoverShardFromStore(shard);
for (int i = 0; i < numDocs; i++) {
indexDoc(shard, Integer.toString(i));
if (rarely()) {
flushShard(shard, false);
}
}
assertDocCount(shard, numDocs);
// snapshot the shard
final Repository repository = createRepository();
final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
final String shardGen = snapshotShard(shard, snapshot, repository);
assertNotNull(shardGen);
final Snapshot snapshotWithSameName = new Snapshot(repository.getMetadata().name(), new SnapshotId(snapshot.getSnapshotId().getName(), "_uuid2"));
final PlainActionFuture<SnapshotInfo> future = PlainActionFuture.newFuture();
repository.finalizeSnapshot(snapshot.getSnapshotId(), ShardGenerations.builder().put(indexId, 0, shardGen).build(), 0L, null, 1, Collections.emptyList(), -1L, false, Metadata.builder().put(shard.indexSettings().getIndexMetadata(), false).build(), true, future);
future.actionGet();
IndexShardSnapshotFailedException isfe = expectThrows(IndexShardSnapshotFailedException.class, () -> snapshotShard(shard, snapshotWithSameName, repository));
assertThat(isfe.getMessage(), containsString("Duplicate snapshot name"));
} finally {
if (shard != null && shard.state() != IndexShardState.CLOSED) {
try {
shard.close("test", false);
} finally {
IOUtils.close(shard.store());
}
}
}
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class PeerRecoverySourceServiceTests method testDuplicateRecoveries.
@Test
public void testDuplicateRecoveries() throws IOException {
IndexShard primary = newStartedShard(true);
PeerRecoverySourceService peerRecoverySourceService = new PeerRecoverySourceService(mock(TransportService.class), mock(IndicesService.class), new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)));
StartRecoveryRequest startRecoveryRequest = new StartRecoveryRequest(primary.shardId(), randomAlphaOfLength(10), getFakeDiscoNode("source"), getFakeDiscoNode("target"), Store.MetadataSnapshot.EMPTY, randomBoolean(), randomLong(), SequenceNumbers.UNASSIGNED_SEQ_NO);
peerRecoverySourceService.start();
RecoverySourceHandler handler = peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary);
DelayRecoveryException delayRecoveryException = expectThrows(DelayRecoveryException.class, () -> peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary));
assertThat(delayRecoveryException.getMessage(), containsString("recovery with same target already registered"));
peerRecoverySourceService.ongoingRecoveries.remove(primary, handler);
// re-adding after removing previous attempt works
handler = peerRecoverySourceService.ongoingRecoveries.addNewRecovery(startRecoveryRequest, primary);
peerRecoverySourceService.ongoingRecoveries.remove(primary, handler);
closeShards(primary);
}
Aggregations