Search in sources :

Example 6 with NodeStats

use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.

the class CircuitBreakerServiceIT method testCustomCircuitBreakerRegistration.

public void testCustomCircuitBreakerRegistration() throws Exception {
    Iterable<CircuitBreakerService> serviceIter = internalCluster().getInstances(CircuitBreakerService.class);
    final String breakerName = "customBreaker";
    BreakerSettings breakerSettings = new BreakerSettings(breakerName, 8, 1.03);
    CircuitBreaker breaker = null;
    for (CircuitBreakerService s : serviceIter) {
        s.registerBreaker(breakerSettings);
        breaker = s.getBreaker(breakerSettings.getName());
    }
    if (breaker != null) {
        try {
            breaker.addEstimateBytesAndMaybeBreak(16, "test");
        } catch (CircuitBreakingException e) {
        // ignore, we forced a circuit break
        }
    }
    NodesStatsResponse stats = client().admin().cluster().prepareNodesStats().clear().setBreaker(true).get();
    int breaks = 0;
    for (NodeStats stat : stats.getNodes()) {
        CircuitBreakerStats breakerStats = stat.getBreaker().getStats(breakerName);
        breaks += breakerStats.getTrippedCount();
    }
    assertThat(breaks, greaterThanOrEqualTo(1));
}
Also used : NodesStatsResponse(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse) NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) BreakerSettings(org.elasticsearch.indices.breaker.BreakerSettings) NoopCircuitBreaker(org.elasticsearch.common.breaker.NoopCircuitBreaker) CircuitBreaker(org.elasticsearch.common.breaker.CircuitBreaker) CircuitBreakerStats(org.elasticsearch.indices.breaker.CircuitBreakerStats) CircuitBreakingException(org.elasticsearch.common.breaker.CircuitBreakingException) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) CircuitBreakerService(org.elasticsearch.indices.breaker.CircuitBreakerService) HierarchyCircuitBreakerService(org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService)

Example 7 with NodeStats

use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.

the class CircuitBreakerServiceIT method testMemoryBreaker.

public void testMemoryBreaker() throws Exception {
    if (noopBreakerUsed()) {
        logger.info("--> noop breakers used, skipping test");
        return;
    }
    assertAcked(prepareCreate("cb-test", 1, Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))).addMapping("type", "test", "type=text,fielddata=true"));
    final Client client = client();
    // index some different terms so we have some field data for loading
    int docCount = scaledRandomIntBetween(300, 1000);
    List<IndexRequestBuilder> reqs = new ArrayList<>();
    for (long id = 0; id < docCount; id++) {
        reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", "value" + id));
    }
    indexRandom(true, false, true, reqs);
    // clear field data cache (thus setting the loaded field data back to 0)
    clearFieldData();
    // Update circuit breaker settings
    Settings settings = Settings.builder().put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b").put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05).build();
    assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
    // execute a search that loads field data (sorting on the "test" field)
    // again, this time it should trip the breaker
    SearchRequestBuilder searchRequest = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC);
    String errMsg = "Data too large, data for [test] would be";
    assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg));
    errMsg = "which is larger than the limit of [100/100b]";
    assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg));
    NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().setBreaker(true).get();
    int breaks = 0;
    for (NodeStats stat : stats.getNodes()) {
        CircuitBreakerStats breakerStats = stat.getBreaker().getStats(CircuitBreaker.FIELDDATA);
        breaks += breakerStats.getTrippedCount();
    }
    assertThat(breaks, greaterThanOrEqualTo(1));
}
Also used : IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) NodesStatsResponse(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse) NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) SearchRequestBuilder(org.elasticsearch.action.search.SearchRequestBuilder) CircuitBreakerStats(org.elasticsearch.indices.breaker.CircuitBreakerStats) ArrayList(java.util.ArrayList) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Client(org.elasticsearch.client.Client) Settings(org.elasticsearch.common.settings.Settings) BreakerSettings(org.elasticsearch.indices.breaker.BreakerSettings)

Example 8 with NodeStats

use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.

the class TruncatedRecoveryIT method testCancelRecoveryAndResume.

/**
     * This test tries to truncate some of larger files in the index to trigger leftovers on the recovery
     * target. This happens during recovery when the last chunk of the file is transferred to the replica
     * we just throw an exception to make sure the recovery fails and we leave some half baked files on the target.
     * Later we allow full recovery to ensure we can still recover and don't run into corruptions.
     */
public void testCancelRecoveryAndResume() throws Exception {
    assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES))).get().isAcknowledged());
    NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
    List<NodeStats> dataNodeStats = new ArrayList<>();
    for (NodeStats stat : nodeStats.getNodes()) {
        if (stat.getNode().isDataNode()) {
            dataNodeStats.add(stat);
        }
    }
    assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
    Collections.shuffle(dataNodeStats, random());
    // we use 2 nodes a lucky and unlucky one
    // the lucky one holds the primary
    // the unlucky one gets the replica and the truncated leftovers
    NodeStats primariesNode = dataNodeStats.get(0);
    NodeStats unluckyNode = dataNodeStats.get(1);
    // create the index and prevent allocation on any other nodes than the lucky one
    // we have no replicas so far and make sure that we allocate the primary on the lucky node
    assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text", "the_id", "type=text").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards()).put("index.routing.allocation.include._name", // only allocate on the lucky node
    primariesNode.getNode().getName())));
    // index some docs and check if they are coming back
    int numDocs = randomIntBetween(100, 200);
    List<IndexRequestBuilder> builder = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        builder.add(client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i), "the_id", id));
    }
    indexRandom(true, builder);
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
    }
    ensureGreen();
    // ensure we have flushed segments and make them a big one via optimize
    client().admin().indices().prepareFlush().setForce(true).get();
    client().admin().indices().prepareForceMerge().setMaxNumSegments(1).setFlush(true).get();
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicBoolean truncate = new AtomicBoolean(true);
    for (NodeStats dataNode : dataNodeStats) {
        MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
        mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {

            @Override
            protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
                if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
                    RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
                    logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk());
                    if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) {
                        latch.countDown();
                        throw new RuntimeException("Caused some truncated files for fun and profit");
                    }
                }
                super.sendRequest(connection, requestId, action, request, options);
            }
        });
    }
    //
    logger.info("--> bumping replicas to 1");
    client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).put(// now allow allocation on all nodes
    "index.routing.allocation.include._name", primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName())).get();
    latch.await();
    // at this point we got some truncated left overs on the replica on the unlucky node
    // now we are allowing the recovery to allocate again and finish to see if we wipe the truncated files
    truncate.compareAndSet(true, false);
    ensureGreen("test");
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
    }
}
Also used : TransportRequest(org.elasticsearch.transport.TransportRequest) MockTransportService(org.elasticsearch.test.transport.MockTransportService) RecoveryFileChunkRequest(org.elasticsearch.indices.recovery.RecoveryFileChunkRequest) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) ArrayList(java.util.ArrayList) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) NodesStatsResponse(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MockTransportService(org.elasticsearch.test.transport.MockTransportService) TransportService(org.elasticsearch.transport.TransportService) TransportRequestOptions(org.elasticsearch.transport.TransportRequestOptions)

Example 9 with NodeStats

use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.

the class TransportClusterStatsAction method nodeOperation.

@Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
    NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
    NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, true, true, true, false, true, false, false, false, false, false, false);
    List<ShardStats> shardsStats = new ArrayList<>();
    for (IndexService indexService : indicesService) {
        for (IndexShard indexShard : indexService) {
            if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
                // only report on fully started shards
                shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats(), indexShard.seqNoStats()));
            }
        }
    }
    ClusterHealthStatus clusterStatus = null;
    if (clusterService.state().nodes().isLocalNodeElectedMaster()) {
        clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus();
    }
    return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]));
}
Also used : ShardStats(org.elasticsearch.action.admin.indices.stats.ShardStats) ClusterHealthStatus(org.elasticsearch.cluster.health.ClusterHealthStatus) NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) ClusterStateHealth(org.elasticsearch.cluster.health.ClusterStateHealth) CommonStats(org.elasticsearch.action.admin.indices.stats.CommonStats) IndexService(org.elasticsearch.index.IndexService) NodeInfo(org.elasticsearch.action.admin.cluster.node.info.NodeInfo) IndexShard(org.elasticsearch.index.shard.IndexShard) ArrayList(java.util.ArrayList)

Example 10 with NodeStats

use of org.elasticsearch.action.admin.cluster.node.stats.NodeStats in project elasticsearch by elastic.

the class RestFielddataAction method buildTable.

private Table buildTable(final RestRequest request, final NodesStatsResponse nodeStatses) {
    Table table = getTableWithHeader(request);
    for (NodeStats nodeStats : nodeStatses.getNodes()) {
        if (nodeStats.getIndices().getFieldData().getFields() != null) {
            for (ObjectLongCursor<String> cursor : nodeStats.getIndices().getFieldData().getFields()) {
                table.startRow();
                table.addCell(nodeStats.getNode().getId());
                table.addCell(nodeStats.getNode().getHostName());
                table.addCell(nodeStats.getNode().getHostAddress());
                table.addCell(nodeStats.getNode().getName());
                table.addCell(cursor.key);
                table.addCell(new ByteSizeValue(cursor.value));
                table.endRow();
            }
        }
    }
    return table;
}
Also used : NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) Table(org.elasticsearch.common.Table) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Aggregations

NodeStats (org.elasticsearch.action.admin.cluster.node.stats.NodeStats)30 NodesStatsResponse (org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse)17 ArrayList (java.util.ArrayList)8 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)8 IOException (java.io.IOException)6 Settings (org.elasticsearch.common.settings.Settings)6 NodeInfo (org.elasticsearch.action.admin.cluster.node.info.NodeInfo)5 Client (org.elasticsearch.client.Client)5 ByteSizeValue (org.elasticsearch.common.unit.ByteSizeValue)5 FsInfo (org.elasticsearch.monitor.fs.FsInfo)5 IndexRequestBuilder (org.elasticsearch.action.index.IndexRequestBuilder)4 SearchRequestBuilder (org.elasticsearch.action.search.SearchRequestBuilder)4 SearchResponse (org.elasticsearch.action.search.SearchResponse)4 Table (org.elasticsearch.common.Table)4 BreakerSettings (org.elasticsearch.indices.breaker.BreakerSettings)4 MockTransportService (org.elasticsearch.test.transport.MockTransportService)4 TransportRequest (org.elasticsearch.transport.TransportRequest)4 TransportRequestOptions (org.elasticsearch.transport.TransportRequestOptions)4 TransportService (org.elasticsearch.transport.TransportService)4 Map (java.util.Map)3