Search in sources :

Example 6 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class IndexRecoveryIT method slowDownRecovery.

private void slowDownRecovery(ByteSizeValue shardSize) {
    long chunkSize = Math.max(1, shardSize.getBytes() / 10);
    assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES).put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES))).get().isAcknowledged());
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Example 7 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class TruncatedRecoveryIT method testCancelRecoveryAndResume.

/**
     * This test tries to truncate some of larger files in the index to trigger leftovers on the recovery
     * target. This happens during recovery when the last chunk of the file is transferred to the replica
     * we just throw an exception to make sure the recovery fails and we leave some half baked files on the target.
     * Later we allow full recovery to ensure we can still recover and don't run into corruptions.
     */
public void testCancelRecoveryAndResume() throws Exception {
    assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES))).get().isAcknowledged());
    NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
    List<NodeStats> dataNodeStats = new ArrayList<>();
    for (NodeStats stat : nodeStats.getNodes()) {
        if (stat.getNode().isDataNode()) {
            dataNodeStats.add(stat);
        }
    }
    assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
    Collections.shuffle(dataNodeStats, random());
    // we use 2 nodes a lucky and unlucky one
    // the lucky one holds the primary
    // the unlucky one gets the replica and the truncated leftovers
    NodeStats primariesNode = dataNodeStats.get(0);
    NodeStats unluckyNode = dataNodeStats.get(1);
    // create the index and prevent allocation on any other nodes than the lucky one
    // we have no replicas so far and make sure that we allocate the primary on the lucky node
    assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=text", "the_id", "type=text").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards()).put("index.routing.allocation.include._name", // only allocate on the lucky node
    primariesNode.getNode().getName())));
    // index some docs and check if they are coming back
    int numDocs = randomIntBetween(100, 200);
    List<IndexRequestBuilder> builder = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        builder.add(client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i), "the_id", id));
    }
    indexRandom(true, builder);
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
    }
    ensureGreen();
    // ensure we have flushed segments and make them a big one via optimize
    client().admin().indices().prepareFlush().setForce(true).get();
    client().admin().indices().prepareForceMerge().setMaxNumSegments(1).setFlush(true).get();
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicBoolean truncate = new AtomicBoolean(true);
    for (NodeStats dataNode : dataNodeStats) {
        MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
        mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {

            @Override
            protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
                if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
                    RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
                    logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk());
                    if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) {
                        latch.countDown();
                        throw new RuntimeException("Caused some truncated files for fun and profit");
                    }
                }
                super.sendRequest(connection, requestId, action, request, options);
            }
        });
    }
    //
    logger.info("--> bumping replicas to 1");
    client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).put(// now allow allocation on all nodes
    "index.routing.allocation.include._name", primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName())).get();
    latch.await();
    // at this point we got some truncated left overs on the replica on the unlucky node
    // now we are allowing the recovery to allocate again and finish to see if we wipe the truncated files
    truncate.compareAndSet(true, false);
    ensureGreen("test");
    for (int i = 0; i < numDocs; i++) {
        String id = Integer.toString(i);
        assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
    }
}
Also used : TransportRequest(org.elasticsearch.transport.TransportRequest) MockTransportService(org.elasticsearch.test.transport.MockTransportService) RecoveryFileChunkRequest(org.elasticsearch.indices.recovery.RecoveryFileChunkRequest) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) ArrayList(java.util.ArrayList) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) NodesStatsResponse(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse) IndexRequestBuilder(org.elasticsearch.action.index.IndexRequestBuilder) NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MockTransportService(org.elasticsearch.test.transport.MockTransportService) TransportService(org.elasticsearch.transport.TransportService) TransportRequestOptions(org.elasticsearch.transport.TransportRequestOptions)

Example 8 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class NodeInfo method readFrom.

@Override
public void readFrom(StreamInput in) throws IOException {
    super.readFrom(in);
    version = Version.readVersion(in);
    build = Build.readBuild(in);
    if (in.readBoolean()) {
        totalIndexingBuffer = new ByteSizeValue(in.readLong());
    } else {
        totalIndexingBuffer = null;
    }
    if (in.readBoolean()) {
        settings = Settings.readSettingsFromStream(in);
    }
    os = in.readOptionalWriteable(OsInfo::new);
    process = in.readOptionalWriteable(ProcessInfo::new);
    jvm = in.readOptionalWriteable(JvmInfo::new);
    threadPool = in.readOptionalWriteable(ThreadPoolInfo::new);
    transport = in.readOptionalWriteable(TransportInfo::new);
    http = in.readOptionalWriteable(HttpInfo::new);
    plugins = in.readOptionalWriteable(PluginsAndModules::new);
    ingest = in.readOptionalWriteable(IngestInfo::new);
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Example 9 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class ChildMemoryCircuitBreaker method noLimit.

private long noLimit(long bytes, String label) {
    long newUsed;
    newUsed = this.used.addAndGet(bytes);
    if (logger.isTraceEnabled()) {
        logger.trace("[{}] Adding [{}][{}] to used bytes [new used: [{}], limit: [-1b]]", this.name, new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed));
    }
    return newUsed;
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Example 10 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class MemoryCircuitBreaker method addEstimateBytesAndMaybeBreak.

/**
     * Add a number of bytes, tripping the circuit breaker if the aggregated
     * estimates are above the limit. Automatically trips the breaker if the
     * memory limit is set to 0. Will never trip the breaker if the limit is
     * set &lt; 0, but can still be used to aggregate estimations.
     * @param bytes number of bytes to add to the breaker
     * @return number of "used" bytes so far
     */
@Override
public double addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException {
    // short-circuit on no data allowed, immediately throwing an exception
    if (memoryBytesLimit == 0) {
        circuitBreak(label, bytes);
    }
    long newUsed;
    // limit), which makes the RamAccountingTermsEnum case faster.
    if (this.memoryBytesLimit == -1) {
        newUsed = this.used.addAndGet(bytes);
        if (logger.isTraceEnabled()) {
            logger.trace("Adding [{}][{}] to used bytes [new used: [{}], limit: [-1b]]", new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed));
        }
        return newUsed;
    }
    // Otherwise, check the addition and commit the addition, looping if
    // there are conflicts. May result in additional logging, but it's
    // trace logging and shouldn't be counted on for additions.
    long currentUsed;
    do {
        currentUsed = this.used.get();
        newUsed = currentUsed + bytes;
        long newUsedWithOverhead = (long) (newUsed * overheadConstant);
        if (logger.isTraceEnabled()) {
            logger.trace("Adding [{}][{}] to used bytes [new used: [{}], limit: {} [{}], estimate: {} [{}]]", new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed), memoryBytesLimit, new ByteSizeValue(memoryBytesLimit), newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead));
        }
        if (memoryBytesLimit > 0 && newUsedWithOverhead > memoryBytesLimit) {
            logger.warn("New used memory {} [{}] from field [{}] would be larger than configured breaker: {} [{}], breaking", newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead), label, memoryBytesLimit, new ByteSizeValue(memoryBytesLimit));
            circuitBreak(label, newUsedWithOverhead);
        }
    // Attempt to set the new used value, but make sure it hasn't changed
    // underneath us, if it has, keep trying until we are able to set it
    } while (!this.used.compareAndSet(currentUsed, newUsed));
    return newUsed;
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Aggregations

ByteSizeValue (org.elasticsearch.common.unit.ByteSizeValue)146 Settings (org.elasticsearch.common.settings.Settings)23 Test (org.junit.Test)21 IOException (java.io.IOException)16 CountDownLatch (java.util.concurrent.CountDownLatch)13 ArrayList (java.util.ArrayList)11 TimeValue (org.elasticsearch.common.unit.TimeValue)11 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9 Matchers.containsString (org.hamcrest.Matchers.containsString)9 List (java.util.List)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 Path (java.nio.file.Path)7 Translog (org.elasticsearch.index.translog.Translog)7 Arrays (java.util.Arrays)6 Collections (java.util.Collections)6 Collectors (java.util.stream.Collectors)6 BulkProcessor (org.elasticsearch.action.bulk.BulkProcessor)6 BulkRequest (org.elasticsearch.action.bulk.BulkRequest)6 BytesArray (org.elasticsearch.common.bytes.BytesArray)6 Matchers.equalTo (org.hamcrest.Matchers.equalTo)6