use of org.elasticsearch.index.store.StoreFileMetaData in project elasticsearch by elastic.
the class FileInfoTests method testToFromXContent.
public void testToFromXContent() throws IOException {
final int iters = scaledRandomIntBetween(1, 10);
for (int iter = 0; iter < iters; iter++) {
final BytesRef hash = new BytesRef(scaledRandomIntBetween(0, 1024 * 1024));
hash.length = hash.bytes.length;
for (int i = 0; i < hash.length; i++) {
hash.bytes[i] = randomByte();
}
StoreFileMetaData meta = new StoreFileMetaData("foobar", Math.abs(randomLong()), randomAsciiOfLengthBetween(1, 10), Version.LATEST, hash);
ByteSizeValue size = new ByteSizeValue(Math.abs(randomLong()));
BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("_foobar", meta, size);
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint();
BlobStoreIndexShardSnapshot.FileInfo.toXContent(info, builder, ToXContent.EMPTY_PARAMS);
byte[] xcontent = BytesReference.toBytes(shuffleXContent(builder).bytes());
final BlobStoreIndexShardSnapshot.FileInfo parsedInfo;
try (XContentParser parser = createParser(JsonXContent.jsonXContent, xcontent)) {
parser.nextToken();
parsedInfo = BlobStoreIndexShardSnapshot.FileInfo.fromXContent(parser);
}
assertThat(info.name(), equalTo(parsedInfo.name()));
assertThat(info.physicalName(), equalTo(parsedInfo.physicalName()));
assertThat(info.length(), equalTo(parsedInfo.length()));
assertThat(info.checksum(), equalTo(parsedInfo.checksum()));
assertThat(info.partSize(), equalTo(parsedInfo.partSize()));
assertThat(parsedInfo.metadata().hash().length, equalTo(hash.length));
assertThat(parsedInfo.metadata().hash(), equalTo(hash));
assertThat(parsedInfo.metadata().writtenBy(), equalTo(Version.LATEST));
assertThat(parsedInfo.isSame(info.metadata()), is(true));
}
}
use of org.elasticsearch.index.store.StoreFileMetaData in project elasticsearch by elastic.
the class RecoverySourceHandlerTests method testHandleCorruptedIndexOnSendSendFiles.
public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {
Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1).put("indices.recovery.concurrent_small_file_streams", 1).build();
final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
final StartRecoveryRequest request = new StartRecoveryRequest(shardId, new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, randomBoolean(), randomNonNegativeLong(), randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : 0L);
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, () -> 0L, e -> () -> {
}, recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) {
@Override
protected void failEngine(IOException cause) {
assertFalse(failedEngine.get());
failedEngine.set(true);
}
};
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetaData> metas = new ArrayList<>();
for (StoreFileMetaData md : metadata) {
metas.add(md);
}
CorruptionUtils.corruptFile(random(), FileSystemUtils.files(tempDir, (p) -> (p.getFileName().toString().equals("write.lock") || p.getFileName().toString().startsWith("extra")) == false));
Store targetStore = newStore(createTempDir(), false);
try {
handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), (md) -> {
try {
return new IndexOutputOutputStream(targetStore.createVerifyingOutput(md.name(), md, IOContext.DEFAULT)) {
@Override
public void close() throws IOException {
super.close();
// sync otherwise MDW will mess with it
store.directory().sync(Collections.singleton(md.name()));
}
};
} catch (IOException e) {
throw new RuntimeException(e);
}
});
fail("corrupted index");
} catch (IOException ex) {
assertNotNull(ExceptionsHelper.unwrapCorruption(ex));
}
assertTrue(failedEngine.get());
IOUtils.close(store, targetStore);
}
use of org.elasticsearch.index.store.StoreFileMetaData in project elasticsearch by elastic.
the class RecoverySourceHandlerTests method testSendFiles.
public void testSendFiles() throws Throwable {
Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1).put("indices.recovery.concurrent_small_file_streams", 1).build();
final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
final StartRecoveryRequest request = new StartRecoveryRequest(shardId, new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), null, randomBoolean(), randomNonNegativeLong(), randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong());
Store store = newStore(createTempDir());
RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, () -> 0L, e -> () -> {
}, recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY);
Directory dir = store.directory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
document.add(newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
writer.addDocument(document);
}
writer.commit();
writer.close();
Store.MetadataSnapshot metadata = store.getMetadata(null);
List<StoreFileMetaData> metas = new ArrayList<>();
for (StoreFileMetaData md : metadata) {
metas.add(md);
}
Store targetStore = newStore(createTempDir());
handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), (md) -> {
try {
return new IndexOutputOutputStream(targetStore.createVerifyingOutput(md.name(), md, IOContext.DEFAULT)) {
@Override
public void close() throws IOException {
super.close();
// sync otherwise MDW will mess with it
targetStore.directory().sync(Collections.singleton(md.name()));
}
};
} catch (IOException e) {
throw new RuntimeException(e);
}
});
Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null);
Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata);
assertEquals(metas.size(), recoveryDiff.identical.size());
assertEquals(0, recoveryDiff.different.size());
assertEquals(0, recoveryDiff.missing.size());
IndexReader reader = DirectoryReader.open(targetStore.directory());
assertEquals(numDocs, reader.maxDoc());
IOUtils.close(reader, store, targetStore);
}
use of org.elasticsearch.index.store.StoreFileMetaData in project elasticsearch by elastic.
the class ReplicaShardAllocator method computeMatchingBytes.
private static long computeMatchingBytes(TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore, TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData) {
String primarySyncId = primaryStore.syncId();
String replicaSyncId = storeFilesMetaData.syncId();
// see if we have a sync id we can make use of
if (replicaSyncId != null && replicaSyncId.equals(primarySyncId)) {
return Long.MAX_VALUE;
} else {
long sizeMatched = 0;
for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
String metaDataFileName = storeFileMetaData.name();
if (primaryStore.fileExists(metaDataFileName) && primaryStore.file(metaDataFileName).isSame(storeFileMetaData)) {
sizeMatched += storeFileMetaData.length();
}
}
return sizeMatched;
}
}
use of org.elasticsearch.index.store.StoreFileMetaData in project crate by crate.
the class RecoverySourceHandler method sendFiles.
void sendFiles(Store store, StoreFileMetadata[] files, IntSupplier translogOps, ActionListener<Void> listener) {
// send smallest first
ArrayUtil.timSort(files, Comparator.comparingLong(StoreFileMetadata::length));
final MultiChunkTransfer<StoreFileMetadata, FileChunk> multiFileSender = new MultiChunkTransfer<StoreFileMetadata, FileChunk>(logger, listener, maxConcurrentFileChunks, Arrays.asList(files)) {
final Deque<byte[]> buffers = new ConcurrentLinkedDeque<>();
InputStreamIndexInput currentInput = null;
long offset = 0;
@Override
protected void onNewResource(StoreFileMetadata md) throws IOException {
offset = 0;
IOUtils.close(currentInput, () -> currentInput = null);
final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE);
currentInput = new InputStreamIndexInput(indexInput, md.length()) {
@Override
public void close() throws IOException {
// InputStreamIndexInput's close is a noop
IOUtils.close(indexInput, super::close);
}
};
}
private byte[] acquireBuffer() {
final byte[] buffer = buffers.pollFirst();
if (buffer != null) {
return buffer;
}
return new byte[chunkSizeInBytes];
}
@Override
protected FileChunk nextChunkRequest(StoreFileMetadata md) throws IOException {
assert Transports.assertNotTransportThread("read file chunk");
cancellableThreads.checkForCancel();
final byte[] buffer = acquireBuffer();
final int bytesRead = currentInput.read(buffer);
if (bytesRead == -1) {
throw new CorruptIndexException("file truncated; length=" + md.length() + " offset=" + offset, md.name());
}
final boolean lastChunk = offset + bytesRead == md.length();
final FileChunk chunk = new FileChunk(md, new BytesArray(buffer, 0, bytesRead), offset, lastChunk, () -> buffers.addFirst(buffer));
offset += bytesRead;
return chunk;
}
@Override
protected void executeChunkRequest(FileChunk request, ActionListener<Void> listener) {
cancellableThreads.checkForCancel();
recoveryTarget.writeFileChunk(request.md, request.position, request.content, request.lastChunk, translogOps.getAsInt(), ActionListener.runBefore(listener, request::close));
}
@Override
protected void handleError(StoreFileMetadata md, Exception e) throws Exception {
handleErrorOnSendFiles(store, e, new StoreFileMetadata[] { md });
}
@Override
public void close() throws IOException {
IOUtils.close(currentInput, () -> currentInput = null);
}
};
resources.add(multiFileSender);
multiFileSender.start();
}
Aggregations