use of org.elasticsearch.index.store.StoreFileMetadata in project crate by crate.
the class RecoverySourceHandlerTests method generateFiles.
private List<StoreFileMetadata> generateFiles(Store store, int numFiles, IntSupplier fileSizeSupplier) throws IOException {
List<StoreFileMetadata> files = new ArrayList<>();
for (int i = 0; i < numFiles; i++) {
byte[] buffer = randomByteArrayOfLength(fileSizeSupplier.getAsInt());
CRC32 digest = new CRC32();
digest.update(buffer, 0, buffer.length);
StoreFileMetadata md = new StoreFileMetadata("test-" + i, buffer.length + 8, Store.digestToString(digest.getValue()), org.apache.lucene.util.Version.LATEST);
try (OutputStream out = new IndexOutputOutputStream(store.createVerifyingOutput(md.name(), md, IOContext.DEFAULT))) {
out.write(buffer);
out.write(Numbers.longToBytes(digest.getValue()));
}
store.directory().sync(Collections.singleton(md.name()));
files.add(md);
}
return files;
}
use of org.elasticsearch.index.store.StoreFileMetadata in project crate by crate.
the class RecoverySourceHandlerTests method testHandleExceptionOnSendFiles.
@Test
public void testHandleExceptionOnSendFiles() throws Throwable {
final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service);
final StartRecoveryRequest request = getStartRecoveryRequest();
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetadata> metas = new ArrayList<>();
for (StoreFileMetadata md : metadata) {
metas.add(md);
}
final boolean throwCorruptedIndexException = randomBoolean();
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
if (throwCorruptedIndexException) {
listener.onFailure(new RuntimeException(new CorruptIndexException("foo", "bar")));
} else {
listener.onFailure(new RuntimeException("boom"));
}
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 10), between(1, 4)) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
failedEngine.set(true);
}
};
PlainActionFuture<Void> sendFilesFuture = new PlainActionFuture<>();
handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, sendFilesFuture);
Exception ex = expectThrows(Exception.class, sendFilesFuture::actionGet);
final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex);
if (throwCorruptedIndexException) {
assertNotNull(unwrappedCorruption);
assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]");
} else {
assertNull(unwrappedCorruption);
assertEquals(ex.getMessage(), "boom");
}
assertFalse(failedEngine.get());
IOUtils.close(store);
}
use of org.elasticsearch.index.store.StoreFileMetadata in project crate by crate.
the class RecoverySourceHandlerTests method testSendFileChunksConcurrently.
@Test
public void testSendFileChunksConcurrently() throws Exception {
final IndexShard shard = mock(IndexShard.class);
when(shard.state()).thenReturn(IndexShardState.STARTED);
final List<FileChunkResponse> unrepliedChunks = new CopyOnWriteArrayList<>();
final AtomicInteger sentChunks = new AtomicInteger();
final TestRecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
final AtomicLong chunkNumberGenerator = new AtomicLong();
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
final long chunkNumber = chunkNumberGenerator.getAndIncrement();
logger.info("--> write chunk name={} seq={}, position={}", md.name(), chunkNumber, position);
unrepliedChunks.add(new FileChunkResponse(chunkNumber, listener));
sentChunks.incrementAndGet();
}
};
final int maxConcurrentChunks = between(1, 8);
final int chunkSize = between(1, 32);
final RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, threadPool, getStartRecoveryRequest(), chunkSize, maxConcurrentChunks, between(1, 10));
Store store = newStore(createTempDir(), false);
List<StoreFileMetadata> files = generateFiles(store, between(1, 10), () -> between(1, chunkSize * 20));
int totalChunks = files.stream().mapToInt(md -> ((int) md.length() + chunkSize - 1) / chunkSize).sum();
PlainActionFuture<Void> sendFilesFuture = new PlainActionFuture<>();
handler.sendFiles(store, files.toArray(new StoreFileMetadata[0]), () -> 0, sendFilesFuture);
assertBusy(() -> {
assertThat(sentChunks.get(), equalTo(Math.min(totalChunks, maxConcurrentChunks)));
assertThat(unrepliedChunks, hasSize(sentChunks.get()));
});
List<FileChunkResponse> ackedChunks = new ArrayList<>();
while (sentChunks.get() < totalChunks || unrepliedChunks.isEmpty() == false) {
List<FileChunkResponse> chunksToAck = randomSubsetOf(between(1, unrepliedChunks.size()), unrepliedChunks);
unrepliedChunks.removeAll(chunksToAck);
ackedChunks.addAll(chunksToAck);
ackedChunks.sort(Comparator.comparing(c -> c.chunkNumber));
int checkpoint = -1;
for (int i = 0; i < ackedChunks.size(); i++) {
if (i != ackedChunks.get(i).chunkNumber) {
break;
} else {
checkpoint = i;
}
}
int chunksToSend = Math.min(// limited by the remaining chunks
totalChunks - sentChunks.get(), // limited by the buffering chunks
maxConcurrentChunks - (sentChunks.get() - 1 - checkpoint));
int expectedSentChunks = sentChunks.get() + chunksToSend;
int expectedUnrepliedChunks = unrepliedChunks.size() + chunksToSend;
chunksToAck.forEach(c -> c.listener.onResponse(null));
assertBusy(() -> {
assertThat(sentChunks.get(), equalTo(expectedSentChunks));
assertThat(unrepliedChunks, hasSize(expectedUnrepliedChunks));
});
}
sendFilesFuture.actionGet();
store.close();
}
use of org.elasticsearch.index.store.StoreFileMetadata in project crate by crate.
the class RecoverySourceHandlerTests method testSendFiles.
@Test
public void testSendFiles() throws Throwable {
final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service);
final StartRecoveryRequest request = getStartRecoveryRequest();
Store store = newStore(createTempDir());
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetadata> metas = new ArrayList<>();
for (StoreFileMetadata md : metadata) {
metas.add(md);
}
Store targetStore = newStore(createTempDir());
MultiFileWriter multiFileWriter = new MultiFileWriter(targetStore, mock(RecoveryState.Index.class), "", logger, () -> {
});
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
ActionListener.completeWith(listener, () -> {
multiFileWriter.writeFileChunk(md, position, content, lastChunk);
return null;
});
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 5), between(1, 5));
PlainActionFuture<Void> sendFilesFuture = new PlainActionFuture<>();
handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, sendFilesFuture);
sendFilesFuture.actionGet(5, TimeUnit.SECONDS);
Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null);
Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata);
assertEquals(metas.size(), recoveryDiff.identical.size());
assertEquals(0, recoveryDiff.different.size());
assertEquals(0, recoveryDiff.missing.size());
IndexReader reader = DirectoryReader.open(targetStore.directory());
assertEquals(numDocs, reader.maxDoc());
IOUtils.close(reader, store, multiFileWriter, targetStore);
}
use of org.elasticsearch.index.store.StoreFileMetadata in project crate by crate.
the class RecoverySourceHandlerTests method testCancelRecoveryDuringPhase1.
@Test
public void testCancelRecoveryDuringPhase1() throws Exception {
Store store = newStore(createTempDir("source"), false);
IndexShard shard = mock(IndexShard.class);
when(shard.store()).thenReturn(store);
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
AtomicBoolean wasCancelled = new AtomicBoolean();
SetOnce<Runnable> cancelRecovery = new SetOnce<>();
final TestRecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
@Override
public void receiveFileInfo(List<String> phase1FileNames, List<Long> phase1FileSizes, List<String> phase1ExistingFileNames, List<Long> phase1ExistingFileSizes, int totalTranslogOps, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (randomBoolean()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (rarely()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
@Override
public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetadata, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (randomBoolean()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
};
StartRecoveryRequest startRecoveryRequest = getStartRecoveryRequest();
final RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, threadPool, startRecoveryRequest, between(1, 16), between(1, 4), between(1, 4)) {
@Override
void createRetentionLease(long startingSeqNo, ActionListener<RetentionLease> listener) {
final String leaseId = ReplicationTracker.getPeerRecoveryRetentionLeaseId(startRecoveryRequest.targetNode().getId());
listener.onResponse(new RetentionLease(leaseId, startingSeqNo, threadPool.absoluteTimeInMillis(), ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE));
}
};
cancelRecovery.set(() -> handler.cancel("test"));
final StepListener<RecoverySourceHandler.SendFileResult> phase1Listener = new StepListener<>();
try {
final CountDownLatch latch = new CountDownLatch(1);
handler.phase1(DirectoryReader.listCommits(dir).get(0), 0, () -> 0, new LatchedActionListener<>(phase1Listener, latch));
latch.await();
phase1Listener.result();
} catch (Exception e) {
assertTrue(wasCancelled.get());
assertNotNull(ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class));
}
store.close();
}
Aggregations