use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class PeerRecoveryTargetServiceTests method testGetStartingSeqNo.
public void testGetStartingSeqNo() throws Exception {
IndexShard replica = newShard(false);
RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null);
try {
recoveryEmptyReplica(replica);
int docs = randomIntBetween(1, 10);
final String index = replica.shardId().getIndexName();
long seqNo = 0;
for (int i = 0; i < docs; i++) {
Engine.Index indexOp = replica.prepareIndexOnReplica(SourceToParse.source(SourceToParse.Origin.REPLICA, index, "type", "doc_" + i, new BytesArray("{}"), XContentType.JSON), seqNo++, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
replica.index(indexOp);
if (rarely()) {
// insert a gap
seqNo++;
}
}
final long maxSeqNo = replica.seqNoStats().getMaxSeqNo();
final long localCheckpoint = replica.getLocalCheckpoint();
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
replica.updateGlobalCheckpointOnReplica(maxSeqNo - 1);
replica.getTranslog().sync();
// commit is enough, global checkpoint is below max *committed* which is NO_OPS_PERFORMED
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(0L));
replica.flush(new FlushRequest());
// commit is still not good enough, global checkpoint is below max
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(SequenceNumbersService.UNASSIGNED_SEQ_NO));
replica.updateGlobalCheckpointOnReplica(maxSeqNo);
replica.getTranslog().sync();
// commit is enough, global checkpoint is below max
assertThat(PeerRecoveryTargetService.getStartingSeqNo(recoveryTarget), equalTo(localCheckpoint + 1));
} finally {
closeShards(replica);
recoveryTarget.decRef();
}
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class RecoveryStatusTests method testRenameTempFiles.
public void testRenameTempFiles() throws IOException {
IndexService service = createIndex("foo");
IndexShard indexShard = service.getShardOrNull(0);
DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
RecoveryTarget status = new RecoveryTarget(indexShard, node, new PeerRecoveryTargetService.RecoveryListener() {
@Override
public void onRecoveryDone(RecoveryState state) {
}
@Override
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
}
}, version -> {
});
try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store())) {
indexOutput.writeInt(1);
IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar");
assertSame(openIndexOutput, indexOutput);
openIndexOutput.writeInt(1);
CodecUtil.writeFooter(indexOutput);
}
try {
status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store());
fail("file foo.bar is already opened and registered");
} catch (IllegalStateException ex) {
assertEquals("output for file [foo.bar] has already been created", ex.getMessage());
// all well = it's already registered
}
status.removeOpenIndexOutputs("foo.bar");
Set<String> strings = Sets.newHashSet(status.store().directory().listAll());
String expectedFile = null;
for (String file : strings) {
if (Pattern.compile("recovery[.][\\w-]+[.]foo[.]bar").matcher(file).matches()) {
expectedFile = file;
break;
}
}
assertNotNull(expectedFile);
// we have to close it here otherwise rename fails since the write.lock is held by the engine
indexShard.close("foo", false);
status.renameAllTempFiles();
strings = Sets.newHashSet(status.store().directory().listAll());
assertTrue(strings.toString(), strings.contains("foo.bar"));
assertFalse(strings.toString(), strings.contains(expectedFile));
// we must fail the recovery because marking it as done will try to move the shard to POST_RECOVERY, which will fail because it's started
status.fail(new RecoveryFailedException(status.state(), "end of test. OK.", null), false);
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class SyncedFlushSingleNodeTests method testFailAfterIntermediateCommit.
public void testFailAfterIntermediateCommit() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
assertEquals("exactly one commit id", 1, commitIds.size());
if (randomBoolean()) {
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("test").setForce(true).get();
String syncId = UUIDs.base64UUID();
final SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class SyncedFlushSingleNodeTests method testFailWhenCommitIsMissing.
public void testFailWhenCommitIsMissing() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
assertEquals("exactly one commit id", 1, commitIds.size());
// wipe it...
commitIds.clear();
String syncId = UUIDs.base64UUID();
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("no commit id from pre-sync flush", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class RecoveriesCollectionTests method testResetRecovery.
public void testResetRecovery() throws Exception {
try (ReplicationGroup shards = createGroup(0)) {
shards.startAll();
int numDocs = randomIntBetween(1, 15);
shards.indexDocs(numDocs);
final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool, v -> {
});
IndexShard shard = shards.addReplica();
final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shard);
RecoveryTarget recoveryTarget = collection.getRecoveryTarget(recoveryId);
final int currentAsTarget = shard.recoveryStats().currentAsTarget();
final int referencesToStore = recoveryTarget.store().refCount();
IndexShard indexShard = recoveryTarget.indexShard();
Store store = recoveryTarget.store();
String tempFileName = recoveryTarget.getTempNameForFile("foobar");
RecoveryTarget resetRecovery = collection.resetRecovery(recoveryId, TimeValue.timeValueMinutes(60));
final long resetRecoveryId = resetRecovery.recoveryId();
assertNotSame(recoveryTarget, resetRecovery);
assertNotSame(recoveryTarget.cancellableThreads(), resetRecovery.cancellableThreads());
assertSame(indexShard, resetRecovery.indexShard());
assertSame(store, resetRecovery.store());
assertEquals(referencesToStore, resetRecovery.store().refCount());
assertEquals(currentAsTarget, shard.recoveryStats().currentAsTarget());
assertEquals(recoveryTarget.refCount(), 0);
expectThrows(ElasticsearchException.class, () -> recoveryTarget.store());
expectThrows(ElasticsearchException.class, () -> recoveryTarget.indexShard());
String resetTempFileName = resetRecovery.getTempNameForFile("foobar");
assertNotEquals(tempFileName, resetTempFileName);
assertEquals(currentAsTarget, shard.recoveryStats().currentAsTarget());
try (RecoveriesCollection.RecoveryRef newRecoveryRef = collection.getRecovery(resetRecoveryId)) {
shards.recoverReplica(shard, (s, n) -> {
assertSame(s, newRecoveryRef.target().indexShard());
return newRecoveryRef.target();
}, false);
}
shards.assertAllEqual(numDocs);
assertNull("recovery is done", collection.getRecovery(recoveryId));
}
}
Aggregations