use of org.elasticsearch.action.admin.indices.stats.ShardStats in project crate by crate.
the class MasterDisruptionIT method testMappingNewFieldsTimeoutDoesntAffectCheckpoints.
@Test
public void testMappingNewFieldsTimeoutDoesntAffectCheckpoints() throws Exception {
InternalTestCluster internalCluster = internalCluster();
internalCluster.startNodes(3, Settings.builder().put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), "1ms").build());
ensureStableCluster(3);
logger.info("creating table t with 1 shards and 1 replica");
execute("create table t (id int primary key, x object(dynamic)) clustered into 1 shards with " + "(number_of_replicas = 1, \"routing.allocation.exclude._name\" = '" + internalCluster().getMasterName() + "', \"write.wait_for_active_shards\" = 1)");
ensureGreen();
execute("insert into t values (?, ?)", new Object[] { 1, Map.of("first field", "first value") });
ServiceDisruptionScheme disruption = new BlockMasterServiceOnMaster(random());
setDisruptionScheme(disruption);
disruption.startDisrupting();
try {
execute("insert into t values (?, ?), (?, ?), (?, ?)", new Object[] { 2, Map.of("2nd field", "2nd value"), 3, Map.of("3rd field", "3rd value"), 4, Map.of("4th field", "4th value") });
} catch (Exception e) {
// failure is acceptable
}
disruption.stopDisrupting();
String indexName = toIndexName(sqlExecutor.getCurrentSchema(), "t", null);
assertBusy(() -> {
IndicesStatsResponse stats = client().admin().indices().prepareStats(indexName).clear().get();
for (ShardStats shardStats : stats.getShards()) {
assertThat(shardStats.getShardRouting().toString(), shardStats.getSeqNoStats().getGlobalCheckpoint(), equalTo(shardStats.getSeqNoStats().getLocalCheckpoint()));
}
}, 1, TimeUnit.MINUTES);
}
use of org.elasticsearch.action.admin.indices.stats.ShardStats in project crate by crate.
the class IndexRecoveryIT method testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonable.
@Test
public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonable() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
String indexName = "test";
var settings = new ArrayList<String>();
settings.add("number_of_replicas = 1");
settings.add("\"unassigned.node_left.delayed_timeout\"='12h'");
settings.add("\"soft_deletes.enabled\"=true");
settings.add("\"soft_deletes.retention_lease.sync_interval\"='100ms'");
final double reasonableOperationsBasedRecoveryProportion;
if (randomBoolean()) {
reasonableOperationsBasedRecoveryProportion = biasedDoubleBetween(0.05, 0.99);
settings.add("\"recovery.file_based_threshold\"=" + reasonableOperationsBasedRecoveryProportion);
} else {
reasonableOperationsBasedRecoveryProportion = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(Settings.EMPTY);
}
logger.info("--> performing ops-based recoveries up to [{}%] of docs", reasonableOperationsBasedRecoveryProportion * 100.0);
execute("CREATE TABLE doc.test (num INT)" + " CLUSTERED INTO 1 SHARDS" + " WITH (" + String.join(",", settings) + ")");
int numDocs = randomIntBetween(1, 100);
var args = new Object[numDocs][];
for (int i = 0; i < numDocs; i++) {
args[i] = new Object[] { i };
}
execute("INSERT INTO doc.test (num) VALUES (?)", args);
ensureGreen(indexName);
execute("OPTIMIZE TABLE doc.test");
// wait for all history to be discarded
assertBusy(() -> {
for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getShards()) {
final long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo();
assertTrue(shardStats.getRetentionLeaseStats().leases() + " should discard history up to " + maxSeqNo, shardStats.getRetentionLeaseStats().leases().leases().stream().allMatch(l -> l.retainingSequenceNumber() == maxSeqNo + 1));
}
});
// ensure that all operations are in the safe commit
execute("OPTIMIZE TABLE doc.test");
final ShardStats shardStats = client().admin().indices().prepareStats(indexName).get().getShards()[0];
final long docCount = shardStats.getStats().docs.getCount();
assertThat(shardStats.getStats().docs.getDeleted(), equalTo(0L));
assertThat(shardStats.getSeqNoStats().getMaxSeqNo() + 1, equalTo(docCount));
final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
final DiscoveryNodes discoveryNodes = clusterService().state().nodes();
final IndexShardRoutingTable indexShardRoutingTable = clusterService().state().routingTable().shardRoutingTable(shardId);
final ShardRouting replicaShardRouting = indexShardRoutingTable.replicaShards().get(0);
assertTrue("should have lease for " + replicaShardRouting, client().admin().indices().prepareStats(indexName).get().getShards()[0].getRetentionLeaseStats().leases().contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replicaShardRouting)));
internalCluster().restartNode(discoveryNodes.get(replicaShardRouting.currentNodeId()).getName(), new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)).setWaitForEvents(Priority.LANGUID).get().isTimedOut());
final int newDocCount = Math.toIntExact(Math.round(Math.ceil((1 + Math.ceil(docCount * reasonableOperationsBasedRecoveryProportion)) / (1 - reasonableOperationsBasedRecoveryProportion))));
/*
* newDocCount >= (ceil(docCount * p) + 1) / (1-p)
*
* ==> 0 <= newDocCount * (1-p) - ceil(docCount * p) - 1
* = newDocCount - (newDocCount * p + ceil(docCount * p) + 1)
* < newDocCount - (ceil(newDocCount * p) + ceil(docCount * p))
* <= newDocCount - ceil(newDocCount * p + docCount * p)
*
* ==> docCount < newDocCount + docCount - ceil((newDocCount + docCount) * p)
* == localCheckpoint + 1 - ceil((newDocCount + docCount) * p)
* == firstReasonableSeqNo
*
* The replica has docCount docs, i.e. has operations with seqnos [0..docCount-1], so a seqno-based recovery will start
* from docCount < firstReasonableSeqNo
*
* ==> it is unreasonable to recover the replica using a seqno-based recovery
*/
var args = new Object[newDocCount][];
for (int i = 0; i < newDocCount; i++) {
args[i] = new Object[] { i };
}
execute("INSERT INTO doc.test (num) VALUES (?)", args);
execute("OPTIMIZE TABLE doc.test");
assertBusy(() -> assertFalse("should no longer have lease for " + replicaShardRouting, client().admin().indices().prepareStats(indexName).get().getShards()[0].getRetentionLeaseStats().leases().contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replicaShardRouting))));
return super.onNodeStopped(nodeName);
}
});
ensureGreen(indexName);
// noinspection OptionalGetWithoutIsPresent because it fails the test if absent
final var recoveryState = client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).get().shardRecoveryStates().get(indexName).stream().filter(rs -> rs.getPrimary() == false).findFirst().get();
assertThat(recoveryState.getIndex().totalFileCount(), greaterThan(0));
}
use of org.elasticsearch.action.admin.indices.stats.ShardStats in project crate by crate.
the class IndexRecoveryIT method testRecoveryFlushReplica.
@Test
public void testRecoveryFlushReplica() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(3);
String indexName = "test";
execute("CREATE TABLE doc.test (num INT)" + " CLUSTERED INTO 1 SHARDS" + " WITH (number_of_replicas = 0)");
int numDocs = randomIntBetween(1, 10);
var args = new Object[numDocs][];
for (int i = 0; i < numDocs; i++) {
args[i] = new Object[] { i };
}
execute("INSERT INTO doc.test (num) VALUES (?)", args);
execute("ALTER TABLE doc.test SET (number_of_replicas = 1)");
ensureGreen();
ShardId shardId = null;
for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) {
shardId = shardStats.getShardRouting().shardId();
if (shardStats.getShardRouting().primary() == false) {
assertThat(shardStats.getCommitStats().getNumDocs(), equalTo(numDocs));
SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(shardStats.getCommitStats().getUserData().entrySet());
assertThat(commitInfo.localCheckpoint, equalTo(shardStats.getSeqNoStats().getLocalCheckpoint()));
assertThat(commitInfo.maxSeqNo, equalTo(shardStats.getSeqNoStats().getMaxSeqNo()));
}
}
SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertBusy(() -> assertThat(client().execute(SyncedFlushAction.INSTANCE, new SyncedFlushRequest(indexName)).get().failedShards(), equalTo(0)));
execute("ALTER TABLE doc.test SET (number_of_replicas = 2)");
ensureGreen(indexName);
// Recovery should keep syncId if no indexing activity on the primary after synced-flush.
Set<String> syncIds = Stream.of(client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()).map(shardStats -> shardStats.getCommitStats().syncId()).collect(Collectors.toSet());
assertThat(syncIds, hasSize(1));
}
Aggregations