use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class GlobalCheckpointSyncIT method testPersistLocalCheckpoint.
public void testPersistLocalCheckpoint() {
internalCluster().ensureAtLeastNumDataNodes(2);
execute("create table test(id integer) clustered into 1 shards with" + "(\"global_checkpoint_sync.interval\" = ?, \"translog.durability\" = ?, number_of_replicas = ?)", new Object[] { "10ms", Translog.Durability.REQUEST.toString(), randomIntBetween(0, 1) });
var indexName = getFqn("test");
ensureGreen(indexName);
int numDocs = randomIntBetween(1, 20);
logger.info("numDocs {}", numDocs);
long maxSeqNo = 0;
for (int i = 0; i < numDocs; i++) {
maxSeqNo = (long) execute("insert into test(id) values(?) returning _seq_no", new Object[] { i }).rows()[0][0];
logger.info("got {}", maxSeqNo);
}
for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) {
for (IndexService indexService : indicesService) {
for (IndexShard shard : indexService) {
final SeqNoStats seqNoStats = shard.seqNoStats();
assertThat(maxSeqNo, equalTo(seqNoStats.getMaxSeqNo()));
assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
;
}
}
}
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class RetentionLeaseBackgroundSyncActionTests method testRetentionLeaseBackgroundSyncActionOnReplica.
@Test
public void testRetentionLeaseBackgroundSyncActionOnReplica() throws WriteStateException {
final IndicesService indicesService = mock(IndicesService.class);
final Index index = new Index("index", "uuid");
final IndexService indexService = mock(IndexService.class);
when(indicesService.indexServiceSafe(index)).thenReturn(indexService);
final int id = randomIntBetween(0, 4);
final IndexShard indexShard = mock(IndexShard.class);
when(indexService.getShard(id)).thenReturn(indexShard);
final ShardId shardId = new ShardId(index, id);
when(indexShard.shardId()).thenReturn(shardId);
final RetentionLeaseBackgroundSyncAction action = new RetentionLeaseBackgroundSyncAction(transportService, clusterService, indicesService, threadPool, shardStateAction);
final RetentionLeases retentionLeases = mock(RetentionLeases.class);
final RetentionLeaseBackgroundSyncAction.Request request = new RetentionLeaseBackgroundSyncAction.Request(indexShard.shardId(), retentionLeases);
final TransportReplicationAction.ReplicaResult result = action.shardOperationOnReplica(request, indexShard);
// the retention leases on the shard should be updated
verify(indexShard).updateRetentionLeasesOnReplica(retentionLeases);
// the retention leases on the shard should be persisted
verify(indexShard).persistRetentionLeases();
// the result should indicate success
final AtomicBoolean success = new AtomicBoolean();
result.runPostReplicaActions(ActionListener.wrap(r -> success.set(true), e -> fail(e.toString())));
assertTrue(success.get());
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class RetentionLeaseBackgroundSyncActionTests method testBlocks.
@Test
public void testBlocks() {
final IndicesService indicesService = mock(IndicesService.class);
final Index index = new Index("index", "uuid");
final IndexService indexService = mock(IndexService.class);
when(indicesService.indexServiceSafe(index)).thenReturn(indexService);
final int id = randomIntBetween(0, 4);
final IndexShard indexShard = mock(IndexShard.class);
when(indexService.getShard(id)).thenReturn(indexShard);
final ShardId shardId = new ShardId(index, id);
when(indexShard.shardId()).thenReturn(shardId);
final RetentionLeaseBackgroundSyncAction action = new RetentionLeaseBackgroundSyncAction(transportService, clusterService, indicesService, threadPool, shardStateAction);
assertNull(action.indexBlockLevel());
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class RetentionLeaseIT method testRetentionLeaseSyncedOnRemove.
@Test
public void testRetentionLeaseSyncedOnRemove() throws Exception {
final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2);
internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
execute("create table doc.tbl (x int) clustered into 1 shards with (number_of_replicas = ?)", new Object[] { numberOfReplicas });
ensureGreen("tbl");
final String primaryShardNodeId = clusterService().state().routingTable().index("tbl").shard(0).primaryShard().currentNodeId();
final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("tbl"), 0));
final int length = randomIntBetween(1, 8);
final Map<String, RetentionLease> currentRetentionLeases = new LinkedHashMap<>();
for (int i = 0; i < length; i++) {
final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8));
final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE);
final String source = randomAlphaOfLength(8);
final CountDownLatch latch = new CountDownLatch(1);
final ActionListener<ReplicationResponse> listener = countDownLatchListener(latch);
// simulate a peer recovery which locks the soft deletes policy on the primary
final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock(Engine.HistorySource.INDEX) : () -> {
};
currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener));
latch.await();
retentionLock.close();
}
for (int i = 0; i < length; i++) {
final String id = randomFrom(currentRetentionLeases.keySet());
final CountDownLatch latch = new CountDownLatch(1);
primary.removeRetentionLease(id, countDownLatchListener(latch));
// simulate a peer recovery which locks the soft deletes policy on the primary
final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock(Engine.HistorySource.INDEX) : () -> {
};
currentRetentionLeases.remove(id);
latch.await();
retentionLock.close();
// check retention leases have been written on the primary
assertThat(currentRetentionLeases, equalTo(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(primary.loadRetentionLeases())));
// check current retention leases have been synced to all replicas
for (final ShardRouting replicaShard : clusterService().state().routingTable().index("tbl").shard(0).replicaShards()) {
final String replicaShardNodeId = replicaShard.currentNodeId();
final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName).getShardOrNull(new ShardId(resolveIndex("tbl"), 0));
final Map<String, RetentionLease> retentionLeasesOnReplica = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.getRetentionLeases());
assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases));
// check retention leases have been written on the replica
assertThat(currentRetentionLeases, equalTo(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.loadRetentionLeases())));
}
}
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class RetentionLeaseIT method runWaitForShardsTest.
private void runWaitForShardsTest(final String idForInitialRetentionLease, final long initialRetainingSequenceNumber, final BiConsumer<IndexShard, ActionListener<ReplicationResponse>> primaryConsumer, final Consumer<IndexShard> afterSync) throws InterruptedException {
final int numDataNodes = internalCluster().numDataNodes();
execute("create table doc.tbl (x int) clustered into 1 shards " + "with (" + " number_of_replicas = ?, " + " \"soft_deletes.enabled\" = true," + " \"soft_deletes.retention_lease.sync_interval\" = ?)", new Object[] { numDataNodes == 1 ? 0 : numDataNodes - 1, TimeValue.timeValueSeconds(1).getStringRep() });
ensureYellowAndNoInitializingShards("tbl");
assertFalse(client().admin().cluster().prepareHealth("tbl").setWaitForActiveShards(numDataNodes).get().isTimedOut());
final String primaryShardNodeId = clusterService().state().routingTable().index("tbl").shard(0).primaryShard().currentNodeId();
final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("tbl"), 0));
final String source = randomAlphaOfLength(8);
final CountDownLatch latch = new CountDownLatch(1);
final ActionListener<ReplicationResponse> listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString()));
primary.addRetentionLease(idForInitialRetentionLease, initialRetainingSequenceNumber, source, listener);
latch.await();
final String waitForActiveValue = randomBoolean() ? "all" : Integer.toString(numDataNodes);
execute("alter table doc.tbl set (\"write.wait_for_active_shards\" = ?)", new Object[] { waitForActiveValue });
final CountDownLatch actionLatch = new CountDownLatch(1);
final AtomicBoolean success = new AtomicBoolean();
primaryConsumer.accept(primary, new ActionListener<ReplicationResponse>() {
@Override
public void onResponse(final ReplicationResponse replicationResponse) {
success.set(true);
actionLatch.countDown();
}
@Override
public void onFailure(final Exception e) {
fail(e.toString());
}
});
actionLatch.await();
assertTrue(success.get());
afterSync.accept(primary);
}
Aggregations