use of org.opensearch.index.store.StoreFileMetadata in project OpenSearch by opensearch-project.
the class RecoverySourceHandlerTests method testHandleExceptionOnSendFiles.
public void testHandleExceptionOnSendFiles() throws Throwable {
final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service);
final StartRecoveryRequest request = getStartRecoveryRequest();
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetadata> metas = new ArrayList<>();
for (StoreFileMetadata md : metadata) {
metas.add(md);
}
final boolean throwCorruptedIndexException = randomBoolean();
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
if (throwCorruptedIndexException) {
listener.onFailure(new RuntimeException(new CorruptIndexException("foo", "bar")));
} else {
listener.onFailure(new RuntimeException("boom"));
}
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 10), between(1, 4)) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
failedEngine.set(true);
}
};
PlainActionFuture<Void> sendFilesFuture = new PlainActionFuture<>();
handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, sendFilesFuture);
Exception ex = expectThrows(Exception.class, sendFilesFuture::actionGet);
final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex);
if (throwCorruptedIndexException) {
assertNotNull(unwrappedCorruption);
assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]");
} else {
assertNull(unwrappedCorruption);
assertEquals(ex.getMessage(), "boom");
}
assertFalse(failedEngine.get());
IOUtils.close(store);
}
use of org.opensearch.index.store.StoreFileMetadata in project OpenSearch by opensearch-project.
the class RecoverySourceHandlerTests method testHandleCorruptedIndexOnSendSendFiles.
public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {
Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1).put("indices.recovery.concurrent_small_file_streams", 1).build();
final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
final StartRecoveryRequest request = getStartRecoveryRequest();
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetadata> metas = new ArrayList<>();
for (StoreFileMetadata md : metadata) {
metas.add(md);
}
CorruptionUtils.corruptFile(random(), FileSystemUtils.files(tempDir, (p) -> (p.getFileName().toString().equals("write.lock") || p.getFileName().toString().startsWith("extra")) == false));
Store targetStore = newStore(createTempDir(), false);
MultiFileWriter multiFileWriter = new MultiFileWriter(targetStore, mock(RecoveryState.Index.class), "", logger, () -> {
});
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
ActionListener.completeWith(listener, () -> {
multiFileWriter.writeFileChunk(md, position, content, lastChunk);
return null;
});
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 8), between(1, 8)) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
failedEngine.set(true);
}
};
SetOnce<Exception> sendFilesError = new SetOnce<>();
CountDownLatch latch = new CountDownLatch(1);
handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, new LatchedActionListener<>(ActionListener.wrap(r -> sendFilesError.set(null), e -> sendFilesError.set(e)), latch));
latch.await();
assertThat(sendFilesError.get(), instanceOf(IOException.class));
assertNotNull(ExceptionsHelper.unwrapCorruption(sendFilesError.get()));
assertTrue(failedEngine.get());
// ensure all chunk requests have been completed; otherwise some files on the target are left open.
IOUtils.close(() -> terminate(threadPool), () -> threadPool = null);
IOUtils.close(store, multiFileWriter, targetStore);
}
use of org.opensearch.index.store.StoreFileMetadata in project OpenSearch by opensearch-project.
the class RecoverySourceHandlerTests method testCancelRecoveryDuringPhase1.
public void testCancelRecoveryDuringPhase1() throws Exception {
Store store = newStore(createTempDir("source"), false);
IndexShard shard = mock(IndexShard.class);
when(shard.store()).thenReturn(store);
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
AtomicBoolean wasCancelled = new AtomicBoolean();
SetOnce<Runnable> cancelRecovery = new SetOnce<>();
final TestRecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
@Override
public void receiveFileInfo(List<String> phase1FileNames, List<Long> phase1FileSizes, List<String> phase1ExistingFileNames, List<Long> phase1ExistingFileSizes, int totalTranslogOps, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (randomBoolean()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (rarely()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
@Override
public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetadata, ActionListener<Void> listener) {
recoveryExecutor.execute(() -> listener.onResponse(null));
if (randomBoolean()) {
wasCancelled.set(true);
cancelRecovery.get().run();
}
}
};
final StartRecoveryRequest startRecoveryRequest = getStartRecoveryRequest();
final RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, threadPool, startRecoveryRequest, between(1, 16), between(1, 4), between(1, 4)) {
@Override
void createRetentionLease(long startingSeqNo, ActionListener<RetentionLease> listener) {
final String leaseId = ReplicationTracker.getPeerRecoveryRetentionLeaseId(startRecoveryRequest.targetNode().getId());
listener.onResponse(new RetentionLease(leaseId, startingSeqNo, threadPool.absoluteTimeInMillis(), ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE));
}
};
cancelRecovery.set(() -> handler.cancel("test"));
final StepListener<RecoverySourceHandler.SendFileResult> phase1Listener = new StepListener<>();
try {
final CountDownLatch latch = new CountDownLatch(1);
handler.phase1(DirectoryReader.listCommits(dir).get(0), 0, () -> 0, new LatchedActionListener<>(phase1Listener, latch));
latch.await();
phase1Listener.result();
} catch (Exception e) {
assertTrue(wasCancelled.get());
assertNotNull(ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class));
}
store.close();
}
use of org.opensearch.index.store.StoreFileMetadata in project OpenSearch by opensearch-project.
the class RecoverySourceHandlerTests method generateFiles.
private List<StoreFileMetadata> generateFiles(Store store, int numFiles, IntSupplier fileSizeSupplier) throws IOException {
List<StoreFileMetadata> files = new ArrayList<>();
for (int i = 0; i < numFiles; i++) {
byte[] buffer = randomByteArrayOfLength(fileSizeSupplier.getAsInt());
CRC32 digest = new CRC32();
digest.update(buffer, 0, buffer.length);
StoreFileMetadata md = new StoreFileMetadata("test-" + i, buffer.length + 8, Store.digestToString(digest.getValue()), org.apache.lucene.util.Version.LATEST);
try (OutputStream out = new IndexOutputOutputStream(store.createVerifyingOutput(md.name(), md, IOContext.DEFAULT))) {
out.write(buffer);
out.write(Numbers.longToBytes(digest.getValue()));
}
store.directory().sync(Collections.singleton(md.name()));
files.add(md);
}
return files;
}
use of org.opensearch.index.store.StoreFileMetadata in project OpenSearch by opensearch-project.
the class RecoverySourceHandlerTests method testSendFiles.
public void testSendFiles() throws Throwable {
final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, service);
final StartRecoveryRequest request = getStartRecoveryRequest();
Store store = newStore(createTempDir());
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetadata> metas = new ArrayList<>();
for (StoreFileMetadata md : metadata) {
metas.add(md);
}
Store targetStore = newStore(createTempDir());
MultiFileWriter multiFileWriter = new MultiFileWriter(targetStore, mock(RecoveryState.Index.class), "", logger, () -> {
});
RecoveryTargetHandler target = new TestRecoveryTargetHandler() {
@Override
public void writeFileChunk(StoreFileMetadata md, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
ActionListener.completeWith(listener, () -> {
multiFileWriter.writeFileChunk(md, position, content, lastChunk);
return null;
});
}
};
RecoverySourceHandler handler = new RecoverySourceHandler(null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, request, Math.toIntExact(recoverySettings.getChunkSize().getBytes()), between(1, 5), between(1, 5));
PlainActionFuture<Void> sendFilesFuture = new PlainActionFuture<>();
handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, sendFilesFuture);
sendFilesFuture.actionGet();
Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null);
Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata);
assertEquals(metas.size(), recoveryDiff.identical.size());
assertEquals(0, recoveryDiff.different.size());
assertEquals(0, recoveryDiff.missing.size());
IndexReader reader = DirectoryReader.open(targetStore.directory());
assertEquals(numDocs, reader.maxDoc());
IOUtils.close(reader, store, multiFileWriter, targetStore);
}
Aggregations