use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class IndexServiceTests method testAsyncTranslogTrimActuallyWorks.
public void testAsyncTranslogTrimActuallyWorks() throws Exception {
execute("create table test(x int, data string) clustered into 1 shards with (\"translog.sync_interval\" = '100ms')");
IndexService indexService = getIndexService("test");
ensureGreen(indexService.index().getName());
assertTrue(indexService.getTrimTranslogTask().mustReschedule());
execute("insert into test (x, data) values (1, 'foo')");
IndexShard shard = indexService.getShard(0);
flushShard(shard, true);
assertBusy(() -> assertThat(EngineTestCase.getTranslog(getEngine(shard)).totalOperations(), equalTo(0)));
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class IndexServiceTests method testRescheduleAsyncFsync.
public void testRescheduleAsyncFsync() throws Exception {
execute("create table test(x int, data string) clustered into 1 shards with (\"translog.sync_interval\" = '100ms', \"translog.durability\" = 'REQUEST')");
IndexService indexService = getIndexService("test");
var indexName = indexService.index().getName();
ensureGreen(indexName);
assertNull(indexService.getFsyncTask());
execute("alter table test set (\"translog.durability\" = 'ASYNC')");
assertNotNull(indexService.getFsyncTask());
assertTrue(indexService.getFsyncTask().mustReschedule());
execute("insert into test (x, data) values (1, 'foo')");
assertNotNull(indexService.getFsyncTask());
final IndexShard shard = indexService.getShard(0);
assertBusy(() -> assertFalse(shard.isSyncNeeded()));
execute("alter table test set (\"translog.durability\" = 'REQUEST')");
assertNull(indexService.getFsyncTask());
execute("alter table test set (\"translog.durability\" = 'ASYNC')");
assertNotNull(indexService.getFsyncTask());
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class IndexServiceTests method testAsyncFsyncActuallyWorks.
public void testAsyncFsyncActuallyWorks() throws Exception {
execute("create table test(x int, data string) clustered into 1 shards with (\"translog.sync_interval\" = '100ms', " + "\"translog.durability\" = 'ASYNC')");
IndexService indexService = getIndexService("test");
var indexName = indexService.index().getName();
ensureGreen(indexName);
assertTrue(indexService.getRefreshTask().mustReschedule());
execute("insert into test (x, data) values (1, 'foo')");
IndexShard shard = indexService.getShard(0);
assertBusy(() -> assertFalse(shard.isSyncNeeded()));
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class RetentionLeaseBackgroundSyncIT method testBackgroundRetentionLeaseSync.
@Test
public void testBackgroundRetentionLeaseSync() throws Exception {
final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2);
internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
execute("create table doc.tbl (x int) clustered into 1 shards " + "with (" + " \"soft_deletes.retention_lease.sync_interval\" = '1s', " + " \"soft_deletes.enabled\" = true, " + " number_of_replicas = ?)", new Object[] { numberOfReplicas });
ensureGreen("tbl");
final String primaryShardNodeId = clusterService().state().routingTable().index("tbl").shard(0).primaryShard().currentNodeId();
final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("tbl"), 0));
// we will add multiple retention leases and expect to see them synced to all replicas
final int length = randomIntBetween(1, 8);
final Map<String, RetentionLease> currentRetentionLeases = new HashMap<>(length);
final List<String> ids = new ArrayList<>(length);
for (int i = 0; i < length; i++) {
final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8));
ids.add(id);
final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE);
final String source = randomAlphaOfLength(8);
final CountDownLatch latch = new CountDownLatch(1);
// put a new lease
currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, ActionListener.wrap(latch::countDown)));
latch.await();
// now renew all existing leases; we expect to see these synced to the replicas
for (int j = 0; j <= i; j++) {
currentRetentionLeases.put(ids.get(j), primary.renewRetentionLease(ids.get(j), randomLongBetween(currentRetentionLeases.get(ids.get(j)).retainingSequenceNumber(), Long.MAX_VALUE), source));
}
assertBusy(() -> {
// check all retention leases have been synced to all replicas
for (final ShardRouting replicaShard : clusterService().state().routingTable().index("tbl").shard(0).replicaShards()) {
final String replicaShardNodeId = replicaShard.currentNodeId();
final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName).getShardOrNull(new ShardId(resolveIndex("tbl"), 0));
assertThat(replica.getRetentionLeases(), equalTo(primary.getRetentionLeases()));
}
});
}
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class TransportUpgradeAction method shardOperation.
@Override
protected ShardUpgradeResult shardOperation(UpgradeRequest request, ShardRouting shardRouting) throws IOException {
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).getShard(shardRouting.shardId().id());
org.apache.lucene.util.Version oldestLuceneSegment = indexShard.upgrade(request);
// We are using the current version of Elasticsearch as upgrade version since we update mapping to match the current version
return new ShardUpgradeResult(shardRouting.shardId(), indexShard.routingEntry().primary(), Version.CURRENT, oldestLuceneSegment);
}
Aggregations