use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.
the class ExternalTestCluster method ensureEstimatedStats.
@Override
public void ensureEstimatedStats() {
if (size() > 0) {
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().clear().setBreaker(true).setIndices(true).execute().actionGet();
for (NodeStats stats : nodeStats.getNodes()) {
assertThat("Fielddata breaker not reset to 0 on node: " + stats.getNode(), stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
// ExternalTestCluster does not check the request breaker,
// because checking it requires a network request, which in
// turn increments the breaker, making it non-0
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L));
assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L));
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L));
}
}
}
use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.
the class InternalTestCluster method ensureEstimatedStats.
@Override
public void ensureEstimatedStats() {
if (size() > 0) {
// of the breakers
for (NodeAndClient nodeAndClient : nodes.values()) {
final IndicesFieldDataCache fdCache = getInstanceFromNode(IndicesService.class, nodeAndClient.node).getIndicesFieldDataCache();
// Clean up the cache, ensuring that entries' listeners have been called
fdCache.getCache().refresh();
final String name = nodeAndClient.name;
final CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class, nodeAndClient.node);
CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA);
assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L));
// fail if it never reached 0
try {
assertBusy(new Runnable() {
@Override
public void run() {
CircuitBreaker reqBreaker = breakerService.getBreaker(CircuitBreaker.REQUEST);
assertThat("Request breaker not reset to 0 on node: " + name, reqBreaker.getUsed(), equalTo(0L));
}
});
} catch (Exception e) {
fail("Exception during check for request breaker reset to 0: " + e);
}
NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node);
CommonStatsFlags flags = new CommonStatsFlags(Flag.FieldData, Flag.QueryCache, Flag.Segments);
NodeStats stats = nodeService.stats(flags, false, false, false, false, false, false, false, false, false, false, false);
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L));
assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L));
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L));
}
}
}
use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.
the class DiskUsageTests method testFillDiskUsageSomeInvalidValues.
public void testFillDiskUsageSomeInvalidValues() {
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvailableUsages = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvailableUsages = ImmutableOpenMap.builder();
FsInfo.Path[] node1FSInfo = new FsInfo.Path[] { new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1), new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280) };
FsInfo.Path[] node2FSInfo = new FsInfo.Path[] { new FsInfo.Path("/least_most", "/dev/sda", -2, -1, -1) };
FsInfo.Path[] node3FSInfo = new FsInfo.Path[] { new FsInfo.Path("/most", "/dev/sda", 100, 90, 70), new FsInfo.Path("/least", "/dev/sda", 10, -8, 0) };
List<NodeStats> nodeStats = Arrays.asList(new NodeStats(new DiscoveryNode("node_1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, null, null, null, null, new FsInfo(0, null, node1FSInfo), null, null, null, null, null, null), new NodeStats(new DiscoveryNode("node_2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, null, null, null, null, new FsInfo(0, null, node2FSInfo), null, null, null, null, null, null), new NodeStats(new DiscoveryNode("node_3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT), 0, null, null, null, null, null, new FsInfo(0, null, node3FSInfo), null, null, null, null, null, null));
InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages);
DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1");
DiskUsage mostNode_1 = newMostAvailableUsages.get("node_1");
assertNull("node1 should have been skipped", leastNode_1);
assertDiskUsage(mostNode_1, node1FSInfo[2]);
DiskUsage leastNode_2 = newLeastAvailableUsages.get("node_2");
DiskUsage mostNode_2 = newMostAvailableUsages.get("node_2");
assertNull("node2 should have been skipped", leastNode_2);
assertNull("node2 should have been skipped", mostNode_2);
DiskUsage leastNode_3 = newLeastAvailableUsages.get("node_3");
DiskUsage mostNode_3 = newMostAvailableUsages.get("node_3");
assertDiskUsage(leastNode_3, node3FSInfo[1]);
assertDiskUsage(mostNode_3, node3FSInfo[0]);
}
use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.
the class RandomExceptionCircuitBreakerIT method testBreakerWithRandomExceptions.
public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException {
for (NodeStats node : client().admin().cluster().prepareNodesStats().clear().setBreaker(true).execute().actionGet().getNodes()) {
assertThat("Breaker is not set to 0", node.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
}
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test-str").field("type", "keyword").field("doc_values", randomBoolean()).endObject().startObject("test-num").field("type", randomFrom(Arrays.asList("float", "long", "double", "short", "integer"))).endObject().endObject().endObject().endObject().string();
final double topLevelRate;
final double lowLevelRate;
if (frequently()) {
if (randomBoolean()) {
if (randomBoolean()) {
lowLevelRate = 1.0 / between(2, 10);
topLevelRate = 0.0d;
} else {
topLevelRate = 1.0 / between(2, 10);
lowLevelRate = 0.0d;
}
} else {
lowLevelRate = 1.0 / between(2, 10);
topLevelRate = 1.0 / between(2, 10);
}
} else {
// rarely no exception
topLevelRate = 0d;
lowLevelRate = 0d;
}
Settings.Builder settings = Settings.builder().put(indexSettings()).put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate).put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate).put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d);
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
CreateIndexResponse response = client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
final int numDocs;
if (response.isShardsAcked() == false) {
/* some seeds just won't let you create the index at all and we enter a ping-pong mode
* trying one node after another etc. that is ok but we need to make sure we don't wait
* forever when indexing documents so we set numDocs = 1 and expect all shards to fail
* when we search below.*/
if (response.isAcknowledged()) {
logger.info("Index creation timed out waiting for primaries to start - only index one doc and expect searches to fail");
} else {
logger.info("Index creation failed - only index one doc and expect searches to fail");
}
numDocs = 1;
} else {
numDocs = between(10, 100);
}
for (int i = 0; i < numDocs; i++) {
try {
client().prepareIndex("test", "type", "" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test-str", randomUnicodeOfLengthBetween(5, 25), "test-num", i).get();
} catch (ElasticsearchException ex) {
}
}
logger.info("Start Refresh");
// don't assert on failures here
RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get();
final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
logger.info("Refresh failed: [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
final int numSearches = scaledRandomIntBetween(50, 150);
NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().clear().setBreaker(true).execute().actionGet();
for (NodeStats stats : resp.getNodes()) {
assertThat("Breaker is set to 0", stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
}
for (int i = 0; i < numSearches; i++) {
SearchRequestBuilder searchRequestBuilder = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery());
if (random().nextBoolean()) {
searchRequestBuilder.addSort("test-str", SortOrder.ASC);
}
searchRequestBuilder.addSort("test-num", SortOrder.ASC);
boolean success = false;
try {
// Sort by the string and numeric fields, to load them into field data
searchRequestBuilder.get();
success = true;
} catch (SearchPhaseExecutionException ex) {
logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
}
if (frequently()) {
// Now, clear the cache and check that the circuit breaker has been
// successfully set back to zero. If there is a bug in the circuit
// breaker adjustment code, it should show up here by the breaker
// estimate being either positive or negative.
// make sure all shards are there - there could be shards that are still starting up.
ensureGreen("test");
assertAllSuccessful(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).execute().actionGet());
// Since .cleanUp() is no longer called on cache clear, we need to call it on each node manually
for (String node : internalCluster().getNodeNames()) {
final IndicesFieldDataCache fdCache = internalCluster().getInstance(IndicesService.class, node).getIndicesFieldDataCache();
// Clean up the cache, ensuring that entries' listeners have been called
fdCache.getCache().refresh();
}
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().clear().setBreaker(true).execute().actionGet();
for (NodeStats stats : nodeStats.getNodes()) {
assertThat("Breaker reset to 0 last search success: " + success + " mapping: " + mapping, stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
}
}
}
}
use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.
the class IndexRecoveryIT method testRerouteRecovery.
public void testRerouteRecovery() throws Exception {
logger.info("--> start node A");
final String nodeA = internalCluster().startNode();
logger.info("--> create index on node: {}", nodeA);
ByteSizeValue shardSize = createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT).getShards()[0].getStats().getStore().size();
logger.info("--> start node B");
final String nodeB = internalCluster().startNode();
ensureGreen();
logger.info("--> slowing down recoveries");
slowDownRecovery(shardSize);
logger.info("--> move shard from: {} to: {}", nodeA, nodeB);
client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB)).execute().actionGet().getState();
logger.info("--> waiting for recovery to start both on source and target");
final Index index = resolveIndex(INDEX_NAME);
assertBusy(new Runnable() {
@Override
public void run() {
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA);
assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1));
indicesService = internalCluster().getInstance(IndicesService.class, nodeB);
assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1));
}
});
logger.info("--> request recoveries");
RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
List<RecoveryState> nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeARecoveryStates.size(), equalTo(1));
List<RecoveryState> nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBRecoveryStates.size(), equalTo(1));
assertRecoveryState(nodeARecoveryStates.get(0), 0, StoreRecoverySource.EMPTY_STORE_INSTANCE, true, Stage.DONE, null, nodeA);
validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex());
assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
logger.info("--> request node recovery stats");
NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
long nodeAThrottling = Long.MAX_VALUE;
long nodeBThrottling = Long.MAX_VALUE;
for (NodeStats nodeStats : statsResponse.getNodes()) {
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
if (nodeStats.getNode().getName().equals(nodeA)) {
assertThat("node A should have ongoing recovery as source", recoveryStats.currentAsSource(), equalTo(1));
assertThat("node A should not have ongoing recovery as target", recoveryStats.currentAsTarget(), equalTo(0));
nodeAThrottling = recoveryStats.throttleTime().millis();
}
if (nodeStats.getNode().getName().equals(nodeB)) {
assertThat("node B should not have ongoing recovery as source", recoveryStats.currentAsSource(), equalTo(0));
assertThat("node B should have ongoing recovery as target", recoveryStats.currentAsTarget(), equalTo(1));
nodeBThrottling = recoveryStats.throttleTime().millis();
}
}
logger.info("--> checking throttling increases");
final long finalNodeAThrottling = nodeAThrottling;
final long finalNodeBThrottling = nodeBThrottling;
assertBusy(new Runnable() {
@Override
public void run() {
NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
assertThat(statsResponse.getNodes(), hasSize(2));
for (NodeStats nodeStats : statsResponse.getNodes()) {
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
if (nodeStats.getNode().getName().equals(nodeA)) {
assertThat("node A throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeAThrottling));
}
if (nodeStats.getNode().getName().equals(nodeB)) {
assertThat("node B throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeBThrottling));
}
}
}
});
logger.info("--> speeding up recoveries");
restoreRecoverySpeed();
// wait for it to be finished
ensureGreen();
response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(1));
assertRecoveryState(recoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(recoveryStates.get(0).getIndex());
statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
assertThat(statsResponse.getNodes(), hasSize(2));
for (NodeStats nodeStats : statsResponse.getNodes()) {
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
assertThat(recoveryStats.currentAsSource(), equalTo(0));
assertThat(recoveryStats.currentAsTarget(), equalTo(0));
if (nodeStats.getNode().getName().equals(nodeA)) {
assertThat("node A throttling should be >0", recoveryStats.throttleTime().millis(), greaterThan(0L));
}
if (nodeStats.getNode().getName().equals(nodeB)) {
assertThat("node B throttling should be >0 ", recoveryStats.throttleTime().millis(), greaterThan(0L));
}
}
logger.info("--> bump replica count");
client().admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(Settings.builder().put("number_of_replicas", 1)).execute().actionGet();
ensureGreen();
statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
assertThat(statsResponse.getNodes(), hasSize(2));
for (NodeStats nodeStats : statsResponse.getNodes()) {
final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
assertThat(recoveryStats.currentAsSource(), equalTo(0));
assertThat(recoveryStats.currentAsTarget(), equalTo(0));
if (nodeStats.getNode().getName().equals(nodeA)) {
assertThat("node A throttling should be >0", recoveryStats.throttleTime().millis(), greaterThan(0L));
}
if (nodeStats.getNode().getName().equals(nodeB)) {
assertThat("node B throttling should be >0 ", recoveryStats.throttleTime().millis(), greaterThan(0L));
}
}
logger.info("--> start node C");
String nodeC = internalCluster().startNode();
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").get().isTimedOut());
logger.info("--> slowing down recoveries");
slowDownRecovery(shardSize);
logger.info("--> move replica shard from: {} to: {}", nodeA, nodeC);
client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC)).execute().actionGet().getState();
response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeARecoveryStates.size(), equalTo(1));
nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBRecoveryStates.size(), equalTo(1));
List<RecoveryState> nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
assertRecoveryState(nodeARecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, false, Stage.DONE, nodeB, nodeA);
validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex());
assertRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
// relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
assertOnGoingRecoveryState(nodeCRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, false, nodeB, nodeC);
validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex());
if (randomBoolean()) {
// shutdown node with relocation source of replica shard and check if recovery continues
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeA));
ensureStableCluster(2);
response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeARecoveryStates.size(), equalTo(0));
nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBRecoveryStates.size(), equalTo(1));
nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
assertRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
assertOnGoingRecoveryState(nodeCRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, false, nodeB, nodeC);
validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex());
}
logger.info("--> speeding up recoveries");
restoreRecoverySpeed();
ensureGreen();
response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
nodeARecoveryStates = findRecoveriesForTargetNode(nodeA, recoveryStates);
assertThat(nodeARecoveryStates.size(), equalTo(0));
nodeBRecoveryStates = findRecoveriesForTargetNode(nodeB, recoveryStates);
assertThat(nodeBRecoveryStates.size(), equalTo(1));
nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
assertRecoveryState(nodeBRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, true, Stage.DONE, nodeA, nodeB);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
// relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
assertRecoveryState(nodeCRecoveryStates.get(0), 0, PeerRecoverySource.INSTANCE, false, Stage.DONE, nodeB, nodeC);
validateIndexRecoveryState(nodeCRecoveryStates.get(0).getIndex());
}
Aggregations