use of org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse in project elasticsearch by elastic.
the class SearchPreferenceIT method testNodesOnlyRandom.
public void testNodesOnlyRandom() throws Exception {
assertAcked(prepareCreate("test").setSettings(//this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
refresh();
final Client client = internalCluster().smartClient();
SearchRequestBuilder request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(// multiple wildchar to cover multi-param usecase
"_only_nodes:*,nodes*");
assertSearchOnRandomNodes(request);
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference("_only_nodes:*");
assertSearchOnRandomNodes(request);
ArrayList<String> allNodeIds = new ArrayList<>();
ArrayList<String> allNodeNames = new ArrayList<>();
ArrayList<String> allNodeHosts = new ArrayList<>();
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet();
for (NodeStats node : nodeStats.getNodes()) {
allNodeIds.add(node.getNode().getId());
allNodeNames.add(node.getNode().getName());
allNodeHosts.add(node.getHostname());
}
String node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeIds.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeNames.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeHosts.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeHosts.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
// Mix of valid and invalid nodes
node_expr = "_only_nodes:*,invalidnode";
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
}
use of org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse in project vertexium by visallo.
the class Elasticsearch5SearchIndexTest method getNumQueries.
private long getNumQueries() {
Client client = elasticsearchResource.getRunner().client();
NodesStatsResponse nodeStats = NodesStatsAction.INSTANCE.newRequestBuilder(client).get();
List<NodeStats> nodes = nodeStats.getNodes();
assertEquals(1, nodes.size());
SearchStats searchStats = nodes.get(0).getIndices().getSearch();
return searchStats.getTotal().getQueryCount();
}
use of org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse in project elasticsearch by elastic.
the class NodeServiceTests method testHttpServerDisabled.
public void testHttpServerDisabled() {
// test for a bug where if HTTP stats were requested but HTTP was disabled, NodeService would hit a NullPointerException
NodesStatsResponse response = client().admin().cluster().nodesStats(new NodesStatsRequest().http(true)).actionGet();
assertThat(response.getNodes(), hasSize(1));
}
use of org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse in project elasticsearch by elastic.
the class CorruptedFileIT method testCorruptionOnNetworkLayerFinalizingRecovery.
/**
* This test triggers a corrupt index exception during finalization size if an empty commit point is transferred
* during recovery we don't know the version of the segments_N file because it has no segments we can take it from.
* This simulates recoveries from old indices or even without checksums and makes sure if we fail during finalization
* we also check if the primary is ok. Without the relevant checks this test fails with a RED cluster
*/
public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionException, InterruptedException, IOException {
internalCluster().ensureAtLeastNumDataNodes(2);
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
List<NodeStats> dataNodeStats = new ArrayList<>();
for (NodeStats stat : nodeStats.getNodes()) {
if (stat.getNode().isDataNode()) {
dataNodeStats.add(stat);
}
}
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
Collections.shuffle(dataNodeStats, random());
NodeStats primariesNode = dataNodeStats.get(0);
NodeStats unluckyNode = dataNodeStats.get(1);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put("index.routing.allocation.include._name", primariesNode.getNode().getName()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).put("index.allocation.max_retries", // keep on retrying
Integer.MAX_VALUE)));
// allocated with empty commit
ensureGreen();
final AtomicBoolean corrupt = new AtomicBoolean(true);
final CountDownLatch hasCorrupted = new CountDownLatch(1);
for (NodeStats dataNode : dataNodeStats) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes;
int i = randomIntBetween(0, req.content().length() - 1);
// flip one byte in the content
array[i] = (byte) ~array[i];
hasCorrupted.countDown();
}
super.sendRequest(connection, requestId, action, request, options);
}
});
}
Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").put("index.routing.allocation.include._name", primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName()).build();
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
client().admin().cluster().prepareReroute().get();
hasCorrupted.await();
corrupt.set(false);
ensureGreen();
}
use of org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse in project elasticsearch by elastic.
the class CorruptedFileIT method testCorruptionOnNetworkLayer.
/**
* Tests corruption that happens on the network layer and that the primary does not get affected by corruption that happens on the way
* to the replica. The file on disk stays uncorrupted
*/
public void testCorruptionOnNetworkLayer() throws ExecutionException, InterruptedException {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
if (cluster().numDataNodes() < 3) {
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
}
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
List<NodeStats> dataNodeStats = new ArrayList<>();
for (NodeStats stat : nodeStats.getNodes()) {
if (stat.getNode().isDataNode()) {
dataNodeStats.add(stat);
}
}
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
Collections.shuffle(dataNodeStats, random());
NodeStats primariesNode = dataNodeStats.get(0);
NodeStats unluckyNode = dataNodeStats.get(1);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, // don't go crazy here it must recovery fast
between(1, 4)).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false).put("index.routing.allocation.include._name", primariesNode.getNode().getName()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
}
indexRandom(true, builders);
ensureGreen();
assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
final boolean truncate = randomBoolean();
for (NodeStats dataNode : dataNodeStats) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
if (truncate && req.length() > 1) {
BytesRef bytesRef = req.content().toBytesRef();
BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1);
request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos());
} else {
assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes : "no internal reference!!";
final byte[] array = req.content().toBytesRef().bytes;
int i = randomIntBetween(0, req.content().length() - 1);
// flip one byte in the content
array[i] = (byte) ~array[i];
}
}
super.sendRequest(connection, requestId, action, request, options);
}
});
}
Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").put("index.routing.allocation.include._name", "*").build();
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
client().admin().cluster().prepareReroute().get();
ClusterHealthResponse actionGet = client().admin().cluster().health(Requests.clusterHealthRequest("test").waitForGreenStatus()).actionGet();
if (actionGet.isTimedOut()) {
logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
}
// we are green so primaries got not corrupted.
// ensure that no shard is actually allocated on the unlucky node
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
for (IndexShardRoutingTable table : clusterStateResponse.getState().getRoutingTable().index("test")) {
for (ShardRouting routing : table) {
if (unluckyNode.getNode().getId().equals(routing.currentNodeId())) {
assertThat(routing.state(), not(equalTo(ShardRoutingState.STARTED)));
assertThat(routing.state(), not(equalTo(ShardRoutingState.RELOCATING)));
}
}
}
final int numIterations = scaledRandomIntBetween(5, 20);
for (int i = 0; i < numIterations; i++) {
SearchResponse response = client().prepareSearch().setSize(numDocs).get();
assertHitCount(response, numDocs);
}
}
Aggregations