Search in sources :

Example 11 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class DiskThresholdDecider method canRemain.

@Override
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
    if (shardRouting.currentNodeId().equals(node.nodeId()) == false) {
        throw new IllegalArgumentException("Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]");
    }
    final ClusterInfo clusterInfo = allocation.clusterInfo();
    final ImmutableOpenMap<String, DiskUsage> usages = clusterInfo.getNodeLeastAvailableDiskUsages();
    final Decision decision = earlyTerminate(allocation, usages);
    if (decision != null) {
        return decision;
    }
    // subtractLeavingShards is passed as true here, since this is only for shards remaining, we will *eventually* have enough disk
    // since shards are moving away. No new shards will be incoming since in canAllocate we pass false for this check.
    final DiskUsage usage = getDiskUsage(node, allocation, usages, true);
    final String dataPath = clusterInfo.getDataPath(shardRouting);
    // If this node is already above the high threshold, the shard cannot remain (get it off!)
    final double freeDiskPercentage = usage.getFreeDiskAsPercentage();
    final long freeBytes = usage.getFreeBytes();
    if (logger.isTraceEnabled()) {
        logger.trace("node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes);
    }
    if (dataPath == null || usage.getPath().equals(dataPath) == false) {
        return allocation.decision(Decision.YES, NAME, "this shard is not allocated on the most utilized disk and can remain");
    }
    if (freeBytes < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) {
        if (logger.isDebugEnabled()) {
            logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, shard cannot remain", diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId());
        }
        return allocation.decision(Decision.NO, NAME, "the shard cannot remain on this node because it is above the high watermark cluster setting [%s=%s] " + "and there is less than the required [%s] free space on node, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes));
    }
    if (freeDiskPercentage < diskThresholdSettings.getFreeDiskThresholdHigh()) {
        if (logger.isDebugEnabled()) {
            logger.debug("less than the required {}% free disk threshold ({}% free) on node {}, shard cannot remain", diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage, node.nodeId());
        }
        return allocation.decision(Decision.NO, NAME, "the shard cannot remain on this node because it is above the high watermark cluster setting [%s=%s] " + "and there is less than the required [%s%%] free disk on node, actual free: [%s%%]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), diskThresholdSettings.getFreeDiskThresholdHigh(), freeDiskPercentage);
    }
    return allocation.decision(Decision.YES, NAME, "there is enough disk on this node for the shard to remain, free: [%s]", new ByteSizeValue(freeBytes));
}
Also used : ClusterInfo(org.elasticsearch.cluster.ClusterInfo) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) DiskUsage(org.elasticsearch.cluster.DiskUsage)

Example 12 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class RestFielddataAction method buildTable.

private Table buildTable(final RestRequest request, final NodesStatsResponse nodeStatses) {
    Table table = getTableWithHeader(request);
    for (NodeStats nodeStats : nodeStatses.getNodes()) {
        if (nodeStats.getIndices().getFieldData().getFields() != null) {
            for (ObjectLongCursor<String> cursor : nodeStats.getIndices().getFieldData().getFields()) {
                table.startRow();
                table.addCell(nodeStats.getNode().getId());
                table.addCell(nodeStats.getNode().getHostName());
                table.addCell(nodeStats.getNode().getHostAddress());
                table.addCell(nodeStats.getNode().getName());
                table.addCell(cursor.key);
                table.addCell(new ByteSizeValue(cursor.value));
                table.endRow();
            }
        }
    }
    return table;
}
Also used : NodeStats(org.elasticsearch.action.admin.cluster.node.stats.NodeStats) Table(org.elasticsearch.common.Table) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Example 13 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class SnapshotShardsService method snapshot.

/**
     * Creates shard snapshot
     *
     * @param snapshot       snapshot
     * @param snapshotStatus snapshot status
     */
private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId, final IndexShardSnapshotStatus snapshotStatus) {
    Repository repository = snapshotsService.getRepositoriesService().repository(snapshot.getRepository());
    ShardId shardId = indexShard.shardId();
    if (!indexShard.routingEntry().primary()) {
        throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary");
    }
    if (indexShard.routingEntry().relocating()) {
        // do not snapshot when in the process of relocation of primaries so we won't get conflicts
        throw new IndexShardSnapshotFailedException(shardId, "cannot snapshot while relocating");
    }
    if (indexShard.state() == IndexShardState.CREATED || indexShard.state() == IndexShardState.RECOVERING) {
        // shard has just been created, or still recovering
        throw new IndexShardSnapshotFailedException(shardId, "shard didn't fully recover yet");
    }
    try {
        // we flush first to make sure we get the latest writes snapshotted
        IndexCommit snapshotIndexCommit = indexShard.acquireIndexCommit(true);
        try {
            repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotIndexCommit, snapshotStatus);
            if (logger.isDebugEnabled()) {
                StringBuilder sb = new StringBuilder();
                sb.append("    index    : version [").append(snapshotStatus.indexVersion()).append("], number_of_files [").append(snapshotStatus.numberOfFiles()).append("] with total_size [").append(new ByteSizeValue(snapshotStatus.totalSize())).append("]\n");
                logger.debug("snapshot ({}) completed to {}, took [{}]\n{}", snapshot, repository, TimeValue.timeValueMillis(snapshotStatus.time()), sb);
            }
        } finally {
            indexShard.releaseIndexCommit(snapshotIndexCommit);
        }
    } catch (SnapshotFailedEngineException e) {
        throw e;
    } catch (IndexShardSnapshotFailedException e) {
        throw e;
    } catch (Exception e) {
        throw new IndexShardSnapshotFailedException(shardId, "Failed to snapshot", e);
    }
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) Repository(org.elasticsearch.repositories.Repository) SnapshotFailedEngineException(org.elasticsearch.index.engine.SnapshotFailedEngineException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) IndexCommit(org.apache.lucene.index.IndexCommit) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) SnapshotFailedEngineException(org.elasticsearch.index.engine.SnapshotFailedEngineException) IOException(java.io.IOException)

Example 14 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class AbstractAsyncBulkByScrollAction method prepareBulkRequest.

/**
     * Prepare the bulk request. Called on the generic thread pool after some preflight checks have been done one the SearchResponse and any
     * delay has been slept. Uses the generic thread pool because reindex is rare enough not to need its own thread pool and because the
     * thread may be blocked by the user script.
     */
void prepareBulkRequest(TimeValue thisBatchStartTime, ScrollableHitSource.Response response) {
    if (task.isCancelled()) {
        finishHim(null);
        return;
    }
    if (response.getHits().isEmpty()) {
        refreshAndFinish(emptyList(), emptyList(), false);
        return;
    }
    task.countBatch();
    List<? extends ScrollableHitSource.Hit> hits = response.getHits();
    if (mainRequest.getSize() != SIZE_ALL_MATCHES) {
        // Truncate the hits if we have more than the request size
        long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed());
        if (remaining < hits.size()) {
            hits = hits.subList(0, (int) remaining);
        }
    }
    BulkRequest request = buildBulk(hits);
    if (request.requests().isEmpty()) {
        /*
             * If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation.
             */
        startNextScroll(thisBatchStartTime, 0);
        return;
    }
    request.timeout(mainRequest.getTimeout());
    request.waitForActiveShards(mainRequest.getWaitForActiveShards());
    if (logger.isDebugEnabled()) {
        logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(), new ByteSizeValue(request.estimatedSizeInBytes()));
    }
    sendBulkRequest(thisBatchStartTime, request);
}
Also used : BulkRequest(org.elasticsearch.action.bulk.BulkRequest) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Example 15 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class CircuitBreakerStats method toXContent.

@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
    builder.startObject(name.toLowerCase(Locale.ROOT));
    builder.field(Fields.LIMIT, limit);
    builder.field(Fields.LIMIT_HUMAN, new ByteSizeValue(limit));
    builder.field(Fields.ESTIMATED, estimated);
    builder.field(Fields.ESTIMATED_HUMAN, new ByteSizeValue(estimated));
    builder.field(Fields.OVERHEAD, overhead);
    builder.field(Fields.TRIPPED_COUNT, trippedCount);
    builder.endObject();
    return builder;
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Aggregations

ByteSizeValue (org.elasticsearch.common.unit.ByteSizeValue)146 Settings (org.elasticsearch.common.settings.Settings)23 Test (org.junit.Test)21 IOException (java.io.IOException)16 CountDownLatch (java.util.concurrent.CountDownLatch)13 ArrayList (java.util.ArrayList)11 TimeValue (org.elasticsearch.common.unit.TimeValue)11 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9 Matchers.containsString (org.hamcrest.Matchers.containsString)9 List (java.util.List)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 Path (java.nio.file.Path)7 Translog (org.elasticsearch.index.translog.Translog)7 Arrays (java.util.Arrays)6 Collections (java.util.Collections)6 Collectors (java.util.stream.Collectors)6 BulkProcessor (org.elasticsearch.action.bulk.BulkProcessor)6 BulkRequest (org.elasticsearch.action.bulk.BulkRequest)6 BytesArray (org.elasticsearch.common.bytes.BytesArray)6 Matchers.equalTo (org.hamcrest.Matchers.equalTo)6