use of org.elasticsearch.index.store.StoreFileMetadata in project crate by crate.
the class PeerRecoveryTargetServiceTests method testWriteFileChunksConcurrently.
@Test
public void testWriteFileChunksConcurrently() throws Exception {
IndexShard sourceShard = newStartedShard(true);
int numDocs = between(20, 100);
for (int i = 0; i < numDocs; i++) {
indexDoc(sourceShard, "_doc", Integer.toString(i));
}
sourceShard.flush(new FlushRequest());
Store.MetadataSnapshot sourceSnapshot = sourceShard.store().getMetadata(null);
List<StoreFileMetadata> mdFiles = new ArrayList<>();
for (StoreFileMetadata md : sourceSnapshot) {
mdFiles.add(md);
}
final IndexShard targetShard = newShard(false);
final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId());
final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId());
targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode));
final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null);
final PlainActionFuture<Void> receiveFileInfoFuture = new PlainActionFuture<>();
recoveryTarget.receiveFileInfo(mdFiles.stream().map(StoreFileMetadata::name).collect(Collectors.toList()), mdFiles.stream().map(StoreFileMetadata::length).collect(Collectors.toList()), Collections.emptyList(), Collections.emptyList(), 0, receiveFileInfoFuture);
receiveFileInfoFuture.actionGet(5, TimeUnit.SECONDS);
List<RecoveryFileChunkRequest> requests = new ArrayList<>();
for (StoreFileMetadata md : mdFiles) {
try (IndexInput in = sourceShard.store().directory().openInput(md.name(), IOContext.READONCE)) {
int pos = 0;
while (pos < md.length()) {
int length = between(1, Math.toIntExact(md.length() - pos));
byte[] buffer = new byte[length];
in.readBytes(buffer, 0, length);
requests.add(new RecoveryFileChunkRequest(0, sourceShard.shardId(), md, pos, new BytesArray(buffer), pos + length == md.length(), 1, 1));
pos += length;
}
}
}
Randomness.shuffle(requests);
BlockingQueue<RecoveryFileChunkRequest> queue = new ArrayBlockingQueue<>(requests.size());
queue.addAll(requests);
Thread[] senders = new Thread[between(1, 4)];
CyclicBarrier barrier = new CyclicBarrier(senders.length);
for (int i = 0; i < senders.length; i++) {
senders[i] = new Thread(() -> {
try {
barrier.await();
RecoveryFileChunkRequest r;
while ((r = queue.poll()) != null) {
recoveryTarget.writeFileChunk(r.metadata(), r.position(), r.content(), r.lastChunk(), r.totalTranslogOps(), ActionListener.wrap(ignored -> {
}, e -> {
throw new AssertionError(e);
}));
}
} catch (Exception e) {
throw new AssertionError(e);
}
});
senders[i].start();
}
for (Thread sender : senders) {
sender.join();
}
PlainActionFuture<Void> cleanFilesFuture = new PlainActionFuture<>();
recoveryTarget.cleanFiles(0, Long.parseLong(sourceSnapshot.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO)), sourceSnapshot, cleanFilesFuture);
cleanFilesFuture.actionGet();
recoveryTarget.decRef();
Store.MetadataSnapshot targetSnapshot = targetShard.snapshotStoreMetadata();
Store.RecoveryDiff diff = sourceSnapshot.recoveryDiff(targetSnapshot);
assertThat(diff.different, empty());
closeShards(sourceShard, targetShard);
}
use of org.elasticsearch.index.store.StoreFileMetadata in project crate by crate.
the class BlobStoreRepositoryRestoreTests method testRestoreSnapshotWithExistingFiles.
/**
* Restoring a snapshot that contains multiple files must succeed even when
* some files already exist in the shard's store.
*/
public void testRestoreSnapshotWithExistingFiles() throws IOException {
final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
IndexShard shard = newShard(shardId, true);
try {
// index documents in the shards
final int numDocs = scaledRandomIntBetween(1, 500);
recoverShardFromStore(shard);
for (int i = 0; i < numDocs; i++) {
indexDoc(shard, Integer.toString(i));
if (rarely()) {
flushShard(shard, false);
}
}
assertDocCount(shard, numDocs);
// snapshot the shard
final Repository repository = createRepository();
final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
snapshotShard(shard, snapshot, repository);
// capture current store files
final Store.MetadataSnapshot storeFiles = shard.snapshotStoreMetadata();
assertFalse(storeFiles.asMap().isEmpty());
// close the shard
closeShards(shard);
// delete some random files in the store
List<String> deletedFiles = randomSubsetOf(randomIntBetween(1, storeFiles.size() - 1), storeFiles.asMap().keySet());
for (String deletedFile : deletedFiles) {
Files.delete(shard.shardPath().resolveIndex().resolve(deletedFile));
}
// build a new shard using the same store directory as the closed shard
ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.ExistingStoreRecoverySource.INSTANCE);
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetadata(), null, new InternalEngineFactory(), () -> {
}, RetentionLeaseSyncer.EMPTY, EMPTY_EVENT_LISTENER);
// restore the shard
recoverShardFromSnapshot(shard, snapshot, repository);
// check that the shard is not corrupted
TestUtil.checkIndex(shard.store().directory());
// check that all files have been restored
final Directory directory = shard.store().directory();
final List<String> directoryFiles = Arrays.asList(directory.listAll());
for (StoreFileMetadata storeFile : storeFiles) {
String fileName = storeFile.name();
assertTrue("File [" + fileName + "] does not exist in store directory", directoryFiles.contains(fileName));
assertEquals(storeFile.length(), shard.store().directory().fileLength(fileName));
}
} finally {
if (shard != null && shard.state() != IndexShardState.CLOSED) {
try {
shard.close("test", false);
} finally {
IOUtils.close(shard.store());
}
}
}
}
use of org.elasticsearch.index.store.StoreFileMetadata in project crate by crate.
the class RemoteRecoveryTargetHandler method writeFileChunk.
@Override
public void writeFileChunk(StoreFileMetadata fileMetadata, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
// Pause using the rate limiter, if desired, to throttle the recovery
final long throttleTimeInNanos;
// always fetch the ratelimiter - it might be updated in real-time on the recovery settings
final RateLimiter rl = recoverySettings.rateLimiter();
if (rl != null) {
long bytes = bytesSinceLastPause.addAndGet(content.length());
if (bytes > rl.getMinPauseCheckBytes()) {
// Time to pause
bytesSinceLastPause.addAndGet(-bytes);
try {
throttleTimeInNanos = rl.pause(bytes);
onSourceThrottle.accept(throttleTimeInNanos);
} catch (IOException e) {
throw new ElasticsearchException("failed to pause recovery", e);
}
} else {
throttleTimeInNanos = 0;
}
} else {
throttleTimeInNanos = 0;
}
final String action = PeerRecoveryTargetService.Actions.FILE_CHUNK;
/* we send estimateTotalOperations with every request since we collect stats on the target and that way we can
* see how many translog ops we accumulate while copying files across the network. A future optimization
* would be in to restart file copy again (new deltas) if we have too many translog ops are piling up.
*/
final RecoveryFileChunkRequest request = new RecoveryFileChunkRequest(recoveryId, shardId, fileMetadata, position, content, lastChunk, totalTranslogOps, throttleTimeInNanos);
final Writeable.Reader<TransportResponse.Empty> reader = in -> TransportResponse.Empty.INSTANCE;
executeRetryableAction(action, request, fileChunkRequestOptions, ActionListener.map(listener, r -> null), reader);
}
use of org.elasticsearch.index.store.StoreFileMetadata in project elasticsearch by elastic.
the class RecoveryFileChunkRequest method readFrom.
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
recoveryId = in.readLong();
shardId = ShardId.readShardId(in);
String name = in.readString();
position = in.readVLong();
long length = in.readVLong();
String checksum = in.readString();
content = in.readBytesReference();
Version writtenBy = Lucene.parseVersionLenient(in.readString(), null);
assert writtenBy != null;
metaData = new StoreFileMetaData(name, length, checksum, writtenBy);
lastChunk = in.readBoolean();
totalTranslogOps = in.readVInt();
sourceThrottleTimeInNanos = in.readLong();
}
use of org.elasticsearch.index.store.StoreFileMetadata in project elasticsearch by elastic.
the class ReplicaShardAllocatorTests method testNoDataForReplicaOnAnyNode.
/**
* Verifies that when there is primary data, but no data at all on other nodes, the shard keeps
* unassigned to be allocated later on.
*/
public void testNoDataForReplicaOnAnyNode() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
testAllocator.allocateUnassigned(allocation);
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
}
Aggregations