use of org.elasticsearch.indices.recovery.RecoveryTarget in project elasticsearch by elastic.
the class RecoveryDuringReplicationTests method testWaitForPendingSeqNo.
@TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + "org.elasticsearch.discovery:TRACE," + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE," + "org.elasticsearch.index.seqno:TRACE")
public void testWaitForPendingSeqNo() throws Exception {
IndexMetaData metaData = buildIndexMetaData(1);
final int pendingDocs = randomIntBetween(1, 5);
final AtomicReference<Semaphore> blockIndexingOnPrimary = new AtomicReference<>();
final CountDownLatch blockedIndexers = new CountDownLatch(pendingDocs);
try (ReplicationGroup shards = new ReplicationGroup(metaData) {
@Override
protected EngineFactory getEngineFactory(ShardRouting routing) {
if (routing.primary()) {
return new EngineFactory() {
@Override
public Engine newReadWriteEngine(EngineConfig config) {
return InternalEngineTests.createInternalEngine((directory, writerConfig) -> new IndexWriter(directory, writerConfig) {
@Override
public long addDocument(Iterable<? extends IndexableField> doc) throws IOException {
Semaphore block = blockIndexingOnPrimary.get();
if (block != null) {
blockedIndexers.countDown();
try {
block.acquire();
} catch (InterruptedException e) {
throw new AssertionError("unexpectedly interrupted", e);
}
}
return super.addDocument(doc);
}
}, null, config);
}
@Override
public Engine newReadOnlyEngine(EngineConfig config) {
throw new UnsupportedOperationException();
}
};
} else {
return null;
}
}
}) {
shards.startAll();
int docs = shards.indexDocs(randomIntBetween(1, 10));
IndexShard replica = shards.getReplicas().get(0);
shards.removeReplica(replica);
closeShards(replica);
docs += pendingDocs;
final Semaphore pendingDocsSemaphore = new Semaphore(pendingDocs);
blockIndexingOnPrimary.set(pendingDocsSemaphore);
blockIndexingOnPrimary.get().acquire(pendingDocs);
CountDownLatch pendingDocsDone = new CountDownLatch(pendingDocs);
for (int i = 0; i < pendingDocs; i++) {
final String id = "pending_" + i;
threadPool.generic().submit(() -> {
try {
shards.index(new IndexRequest(index.getName(), "type", id).source("{}", XContentType.JSON));
} catch (Exception e) {
throw new AssertionError(e);
} finally {
pendingDocsDone.countDown();
}
});
}
// wait for the pending ops to "hang"
blockedIndexers.await();
blockIndexingOnPrimary.set(null);
// index some more
docs += shards.indexDocs(randomInt(5));
IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId());
CountDownLatch recoveryStart = new CountDownLatch(1);
AtomicBoolean preparedForTranslog = new AtomicBoolean(false);
final Future<Void> recoveryFuture = shards.asyncRecoverReplica(newReplica, (indexShard, node) -> {
recoveryStart.countDown();
return new RecoveryTarget(indexShard, node, recoveryListener, l -> {
}) {
@Override
public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
preparedForTranslog.set(true);
super.prepareForTranslogOperations(totalTranslogOps, maxUnsafeAutoIdTimestamp);
}
};
});
recoveryStart.await();
for (int i = 0; i < pendingDocs; i++) {
assertFalse((pendingDocs - i) + " pending operations, recovery should wait", preparedForTranslog.get());
pendingDocsSemaphore.release();
}
pendingDocsDone.await();
// now recovery can finish
recoveryFuture.get();
assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty());
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(docs));
shards.assertAllEqual(docs);
}
}
Aggregations