use of org.elasticsearch.action.LatchedActionListener in project crate by crate.
the class RecoverySourceHandlerTests method testCancelRecoveryDuringPhase1.
@Test
public void testCancelRecoveryDuringPhase1() throws Exception {
Store store = newStore(createTempDir("source"), false);
IndexShard shard = mock(IndexShard.class);
when(shard.store()).thenReturn(store);
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
AtomicBoolean wasCancelled = new AtomicBoolean();
SetOnce<Runnable> cancelRecovery = new SetOnce<>();
final TestRecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
@Override
public void receiveFileInfo(List<String> phase1FileNames, List<Long> phase1FileSizes, List<String> phase1ExistingFileNames, List<Long> phase1ExistingFileSizes, int totalTranslogOps, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (randomBoolean()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (rarely()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
@Override
public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetadata, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (randomBoolean()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
};
StartRecoveryRequest startRecoveryRequest = getStartRecoveryRequest();
final RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, threadPool, startRecoveryRequest, between(1, 16), between(1, 4), between(1, 4)) {
@Override
void createRetentionLease(long startingSeqNo, ActionListener<RetentionLease> listener) {
final String leaseId = ReplicationTracker.getPeerRecoveryRetentionLeaseId(startRecoveryRequest.targetNode().getId());
listener.onResponse(new RetentionLease(leaseId, startingSeqNo, threadPool.absoluteTimeInMillis(), ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE));
}
};
cancelRecovery.set(() -> handler.cancel("test"));
final StepListener<RecoverySourceHandler.SendFileResult> phase1Listener = new StepListener<>();
try {
final CountDownLatch latch = new CountDownLatch(1);
handler.phase1(DirectoryReader.listCommits(dir).get(0), 0, () -> 0, new LatchedActionListener<>(phase1Listener, latch));
latch.await();
phase1Listener.result();
} catch (Exception e) {
assertTrue(wasCancelled.get());
assertNotNull(ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class));
}
store.close();
}
use of org.elasticsearch.action.LatchedActionListener in project crate by crate.
the class RecoverySourceHandlerTests method testSendFileChunksStopOnError.
@Test
public void testSendFileChunksStopOnError() throws Exception {
final List<FileChunkResponse> unrepliedChunks = new CopyOnWriteArrayList<>();
final AtomicInteger sentChunks = new AtomicInteger();
final TestRecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
final AtomicLong chunkNumberGenerator = new AtomicLong();
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
final long chunkNumber = chunkNumberGenerator.getAndIncrement();
logger.info("--> write chunk name={} seq={}, position={}", md.name(), chunkNumber, position);
unrepliedChunks.add(new FileChunkResponse(chunkNumber, listener));
sentChunks.incrementAndGet();
}
};
final int maxConcurrentChunks = between(1, 4);
final int chunkSize = between(1, 16);
final RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(recoveryTarget, recoveryExecutor), threadPool, getStartRecoveryRequest(), chunkSize, maxConcurrentChunks, between(1, 5));
Store store = newStore(createTempDir(), false);
List<StoreFileMetadata> files = generateFiles(store, between(1, 10), () -> between(1, chunkSize * 20));
int totalChunks = files.stream().mapToInt(md -> ((int) md.length() + chunkSize - 1) / chunkSize).sum();
SetOnce<Exception> sendFilesError = new SetOnce<>();
CountDownLatch sendFilesLatch = new CountDownLatch(1);
handler.sendFiles(store, files.toArray(new StoreFileMetadata[0]), () -> 0, new LatchedActionListener<>(ActionListener.wrap(r -> sendFilesError.set(null), e -> sendFilesError.set(e)), sendFilesLatch));
assertBusy(() -> assertThat(sentChunks.get(), equalTo(Math.min(totalChunks, maxConcurrentChunks))));
List<FileChunkResponse> failedChunks = randomSubsetOf(between(1, unrepliedChunks.size()), unrepliedChunks);
CountDownLatch replyLatch = new CountDownLatch(failedChunks.size());
failedChunks.forEach(c -> {
c.listener.onFailure(new IllegalStateException("test chunk exception"));
replyLatch.countDown();
});
replyLatch.await();
unrepliedChunks.removeAll(failedChunks);
unrepliedChunks.forEach(c -> {
if (randomBoolean()) {
c.listener.onFailure(new RuntimeException("test"));
} else {
c.listener.onResponse(null);
}
});
sendFilesLatch.await();
assertThat(sendFilesError.get(), instanceOf(IllegalStateException.class));
assertThat(sendFilesError.get().getMessage(), containsString("test chunk exception"));
assertThat("no more chunks should be sent", sentChunks.get(), equalTo(Math.min(totalChunks, maxConcurrentChunks)));
store.close();
}
use of org.elasticsearch.action.LatchedActionListener in project crate by crate.
the class RecoverySourceHandlerTests method testHandleCorruptedIndexOnSendSendFiles.
@Test
public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {
Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1).put("indices.recovery.concurrent_small_file_streams", 1).build();
final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
final StartRecoveryRequest request = getStartRecoveryRequest();
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetadata> metas = new ArrayList<>();
for (StoreFileMetadata md : metadata) {
metas.add(md);
}
CorruptionUtils.corruptFile(random(), FileSystemUtils.files(tempDir, (p) -> (p.getFileName().toString().equals("write.lock") || p.getFileName().toString().startsWith("extra")) == false));
Store targetStore = newStore(createTempDir(), false);
MultiFileWriter multiFileWriter = new MultiFileWriter(targetStore, mock(RecoveryState.Index.class), "", logger, () -> {
});
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
ActionListener.completeWith(listener, () -> {
multiFileWriter.writeFileChunk(md, position, content, lastChunk);
return null;
});
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 8), between(1, 8)) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
failedEngine.set(true);
}
};
SetOnce<Exception> sendFilesError = new SetOnce<>();
CountDownLatch latch = new CountDownLatch(1);
handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, new LatchedActionListener<>(ActionListener.wrap(r -> sendFilesError.set(null), e -> sendFilesError.set(e)), latch));
latch.await();
assertThat(sendFilesError.get(), instanceOf(IOException.class));
assertNotNull(ExceptionsHelper.unwrapCorruption(sendFilesError.get()));
assertTrue(failedEngine.get());
// ensure all chunk requests have been completed; otherwise some files on the target are left open.
IOUtils.close(() -> terminate(threadPool), () -> threadPool = null);
IOUtils.close(store, multiFileWriter, targetStore);
}
Aggregations