use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class ExplainAnalyzeIntegrationTest method testExplainSelectWithoutJobExecutionContexts.
@Test
public void testExplainSelectWithoutJobExecutionContexts() {
execute("explain analyze select 1");
Map<String, Object> analysis = (Map<String, Object>) response.rows()[0][0];
Map<String, Object> executeAnalysis = (Map<String, Object>) analysis.get("Execute");
assertTrue(executeAnalysis.keySet().contains("Total"));
DiscoveryNodes nodes = clusterService().state().nodes();
List<Matcher<String>> nodeIds = new ArrayList<>(nodes.getSize());
for (DiscoveryNode discoveryNode : nodes) {
nodeIds.add(is(discoveryNode.getId()));
}
assertThat(executeAnalysis.keySet(), hasItems(is("Total"), anyOf(nodeIds.toArray(new Matcher[] {}))));
}
use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class IndexRecoveryIT method testUsesFileBasedRecoveryIfRetentionLeaseMissing.
@Test
public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
String indexName = "test";
execute("CREATE TABLE doc.test (num INT)" + " CLUSTERED INTO 1 SHARDS" + " WITH (" + " number_of_replicas = 1," + " \"unassigned.node_left.delayed_timeout\"='12h'," + " \"soft_deletes.enabled\"=true" + " )");
int numDocs = randomIntBetween(1, 100);
var args = new Object[numDocs][];
for (int i = 0; i < numDocs; i++) {
args[i] = new Object[] { i };
}
execute("INSERT INTO doc.test (num) VALUES (?)", args);
ensureGreen(indexName);
final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
final DiscoveryNodes discoveryNodes = clusterService().state().nodes();
final IndexShardRoutingTable indexShardRoutingTable = clusterService().state().routingTable().shardRoutingTable(shardId);
final IndexShard primary = internalCluster().getInstance(IndicesService.class, discoveryNodes.get(indexShardRoutingTable.primaryShard().currentNodeId()).getName()).getShardOrNull(shardId);
final ShardRouting replicaShardRouting = indexShardRoutingTable.replicaShards().get(0);
internalCluster().restartNode(discoveryNodes.get(replicaShardRouting.currentNodeId()).getName(), new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)).setWaitForEvents(Priority.LANGUID).get().isTimedOut());
final PlainActionFuture<ReplicationResponse> future = new PlainActionFuture<>();
primary.removeRetentionLease(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replicaShardRouting), future);
future.get();
return super.onNodeStopped(nodeName);
}
});
ensureGreen(indexName);
// noinspection OptionalGetWithoutIsPresent because it fails the test if absent
final var recoveryState = client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).get().shardRecoveryStates().get(indexName).stream().filter(rs -> rs.getPrimary() == false).findFirst().get();
assertThat(recoveryState.getIndex().totalFileCount(), greaterThan(0));
}
use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class IndexRecoveryIT method testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint.
@Test
public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
String indexName = "test";
execute("CREATE TABLE doc.test (num INT)" + " CLUSTERED INTO 1 SHARDS" + " WITH (" + " number_of_replicas = 1," + " \"unassigned.node_left.delayed_timeout\"='12h'," + " \"soft_deletes.enabled\"=true" + " )");
int numDocs = randomIntBetween(1, 100);
var args = new Object[numDocs][];
for (int i = 0; i < numDocs; i++) {
args[i] = new Object[] { i };
}
execute("INSERT INTO doc.test (num) VALUES (?)", args);
ensureGreen(indexName);
final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
final DiscoveryNodes discoveryNodes = clusterService().state().nodes();
final IndexShardRoutingTable indexShardRoutingTable = clusterService().state().routingTable().shardRoutingTable(shardId);
final IndexShard primary = internalCluster().getInstance(IndicesService.class, discoveryNodes.get(indexShardRoutingTable.primaryShard().currentNodeId()).getName()).getShardOrNull(shardId);
final ShardRouting replicaShardRouting = indexShardRoutingTable.replicaShards().get(0);
internalCluster().restartNode(discoveryNodes.get(replicaShardRouting.currentNodeId()).getName(), new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)).setWaitForEvents(Priority.LANGUID).get().isTimedOut());
execute("INSERT INTO doc.test (num) VALUES (?)", args);
// We do not guarantee that the replica can recover locally all the way to its own global checkpoint before starting
// to recover from the primary, so we must be careful not to perform an operations-based recovery if this would require
// some operations that are not being retained. Emulate this by advancing the lease ahead of the replica's GCP:
primary.renewRetentionLease(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replicaShardRouting), primary.seqNoStats().getMaxSeqNo() + 1, ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE);
return super.onNodeStopped(nodeName);
}
});
ensureGreen(indexName);
// noinspection OptionalGetWithoutIsPresent because it fails the test if absent
final var recoveryState = client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).get().shardRecoveryStates().get(indexName).stream().filter(rs -> rs.getPrimary() == false).findFirst().get();
assertThat(recoveryState.getIndex().totalFileCount(), greaterThan(0));
}
use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class IndexRecoveryIT method testDoesNotCopyOperationsInSafeCommit.
@Test
public void testDoesNotCopyOperationsInSafeCommit() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
String indexName = "test";
execute("CREATE TABLE doc.test (num INT)" + " CLUSTERED INTO 1 SHARDS" + " WITH (" + " number_of_replicas = 0," + " \"soft_deletes.enabled\"=true" + " )");
int numDocs = randomIntBetween(1, 100);
var args = new Object[numDocs][];
for (int i = 0; i < numDocs; i++) {
args[i] = new Object[] { i };
}
execute("INSERT INTO doc.test (num) VALUES (?)", args);
final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
final DiscoveryNodes discoveryNodes = clusterService().state().nodes();
final IndexShardRoutingTable indexShardRoutingTable = clusterService().state().routingTable().shardRoutingTable(shardId);
final IndexShard primary = internalCluster().getInstance(IndicesService.class, discoveryNodes.get(indexShardRoutingTable.primaryShard().currentNodeId()).getName()).getShardOrNull(shardId);
final long maxSeqNoBeforeRecovery = primary.seqNoStats().getMaxSeqNo();
assertBusy(() -> assertThat(primary.getLastSyncedGlobalCheckpoint(), equalTo(maxSeqNoBeforeRecovery)));
// makes a safe commit
execute("OPTIMIZE TABLE doc.test");
execute("INSERT INTO doc.test (num) VALUES (?)", args);
execute("ALTER TABLE doc.test SET (number_of_replicas = 1)");
ensureGreen(indexName);
final long maxSeqNoAfterRecovery = primary.seqNoStats().getMaxSeqNo();
// noinspection OptionalGetWithoutIsPresent because it fails the test if absent
final var recoveryState = client().execute(RecoveryAction.INSTANCE, new RecoveryRequest()).get().shardRecoveryStates().get(indexName).stream().filter(rs -> rs.getPrimary() == false).findFirst().get();
assertThat((long) recoveryState.getTranslog().recoveredOperations(), lessThanOrEqualTo(maxSeqNoAfterRecovery - maxSeqNoBeforeRecovery));
}
use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.
the class AbstractDisruptionTestCase method assertNoMaster.
void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception {
assertBusy(() -> {
ClusterState state = getNodeClusterState(node);
final DiscoveryNodes nodes = state.nodes();
assertNull("node [" + node + "] still has [" + nodes.getMasterNode() + "] as master", nodes.getMasterNode());
if (expectedBlocks != null) {
for (ClusterBlockLevel level : expectedBlocks.levels()) {
assertTrue("node [" + node + "] does have level [" + level + "] in it's blocks", state.getBlocks().hasGlobalBlockWithLevel(level));
}
}
}, maxWaitTime.getMillis(), TimeUnit.MILLISECONDS);
}
Aggregations