Search in sources :

Example 16 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class StoreRecovery method executeRecovery.

/**
     * Recovers the state of the shard from the store.
     */
private boolean executeRecovery(final IndexShard indexShard, Runnable recoveryRunnable) throws IndexShardRecoveryException {
    try {
        recoveryRunnable.run();
        // Check that the gateway didn't leave the shard in init or recovering stage. it is up to the gateway
        // to call post recovery.
        final IndexShardState shardState = indexShard.state();
        final RecoveryState recoveryState = indexShard.recoveryState();
        assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
        if (logger.isTraceEnabled()) {
            RecoveryState.Index index = recoveryState.getIndex();
            StringBuilder sb = new StringBuilder();
            sb.append("    index    : files           [").append(index.totalFileCount()).append("] with total_size [").append(new ByteSizeValue(index.totalBytes())).append("], took[").append(TimeValue.timeValueMillis(index.time())).append("]\n");
            sb.append("             : recovered_files [").append(index.recoveredFileCount()).append("] with total_size [").append(new ByteSizeValue(index.recoveredBytes())).append("]\n");
            sb.append("             : reusing_files   [").append(index.reusedFileCount()).append("] with total_size [").append(new ByteSizeValue(index.reusedBytes())).append("]\n");
            sb.append("    verify_index    : took [").append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [").append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
            sb.append("    translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations()).append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
            logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb);
        } else if (logger.isDebugEnabled()) {
            logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time()));
        }
        return true;
    } catch (IndexShardRecoveryException e) {
        if (indexShard.state() == IndexShardState.CLOSED) {
            // got closed on us, just ignore this recovery
            return false;
        }
        if ((e.getCause() instanceof IndexShardClosedException) || (e.getCause() instanceof IndexShardNotStartedException)) {
            // got closed on us, just ignore this recovery
            return false;
        }
        throw e;
    } catch (IndexShardClosedException | IndexShardNotStartedException e) {
    } catch (Exception e) {
        if (indexShard.state() == IndexShardState.CLOSED) {
            // got closed on us, just ignore this recovery
            return false;
        }
        throw new IndexShardRecoveryException(shardId, "failed recovery", e);
    }
    return false;
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) EngineException(org.elasticsearch.index.engine.EngineException) IOException(java.io.IOException)

Example 17 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class IndexShard method refresh.

/**
     * Writes all indexing changes to disk and opens a new searcher reflecting all changes.  This can throw {@link AlreadyClosedException}.
     */
public void refresh(String source) {
    verifyNotClosed();
    if (canIndex()) {
        long bytes = getEngine().getIndexBufferRAMBytesUsed();
        writingBytes.addAndGet(bytes);
        try {
            if (logger.isTraceEnabled()) {
                logger.trace("refresh with source [{}] indexBufferRAMBytesUsed [{}]", source, new ByteSizeValue(bytes));
            }
            long time = System.nanoTime();
            getEngine().refresh(source);
            refreshMetric.inc(System.nanoTime() - time);
        } finally {
            if (logger.isTraceEnabled()) {
                logger.trace("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
            }
            writingBytes.addAndGet(-bytes);
        }
    } else {
        if (logger.isTraceEnabled()) {
            logger.trace("refresh with source [{}]", source);
        }
        long time = System.nanoTime();
        getEngine().refresh(source);
        refreshMetric.inc(System.nanoTime() - time);
    }
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Example 18 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class IndexShard method writeIndexingBuffer.

/**
     * Called when our shard is using too much heap and should move buffered indexed/deleted documents to disk.
     */
public void writeIndexingBuffer() {
    if (canIndex() == false) {
        throw new UnsupportedOperationException();
    }
    try {
        Engine engine = getEngine();
        long bytes = engine.getIndexBufferRAMBytesUsed();
        // NOTE: this can be an overestimate by up to 20%, if engine uses IW.flush not refresh, because version map
        // memory is low enough, but this is fine because after the writes finish, IMC will poll again and see that
        // there's still up to the 20% being used and continue writing if necessary:
        logger.debug("add [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
        writingBytes.addAndGet(bytes);
        try {
            engine.writeIndexingBuffer();
        } finally {
            writingBytes.addAndGet(-bytes);
            logger.debug("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
        }
    } catch (Exception e) {
        handleRefreshException(e);
    }
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) Engine(org.elasticsearch.index.engine.Engine) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) ClosedByInterruptException(java.nio.channels.ClosedByInterruptException) ThreadInterruptedException(org.apache.lucene.util.ThreadInterruptedException) RecoveryFailedException(org.elasticsearch.indices.recovery.RecoveryFailedException) EngineException(org.elasticsearch.index.engine.EngineException) IOException(java.io.IOException) ElasticsearchException(org.elasticsearch.ElasticsearchException) NoSuchFileException(java.nio.file.NoSuchFileException) TimeoutException(java.util.concurrent.TimeoutException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) RefreshFailedEngineException(org.elasticsearch.index.engine.RefreshFailedEngineException) FileNotFoundException(java.io.FileNotFoundException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException)

Example 19 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class ExceptionSerializationTests method testRecoverFilesRecoveryException.

public void testRecoverFilesRecoveryException() throws IOException {
    ShardId id = new ShardId("foo", "_na_", 1);
    ByteSizeValue bytes = new ByteSizeValue(randomIntBetween(0, 10000));
    RecoverFilesRecoveryException ex = serialize(new RecoverFilesRecoveryException(id, 10, bytes, null));
    assertEquals(ex.getShardId(), id);
    assertEquals(ex.numberOfFiles(), 10);
    assertEquals(ex.totalFilesSize(), bytes);
    assertEquals(ex.getMessage(), "Failed to transfer [10] files with total size of [" + bytes + "]");
    assertNull(ex.getCause());
    ex = serialize(new RecoverFilesRecoveryException(null, 10, bytes, new NullPointerException()));
    assertNull(ex.getShardId());
    assertEquals(ex.numberOfFiles(), 10);
    assertEquals(ex.totalFilesSize(), bytes);
    assertEquals(ex.getMessage(), "Failed to transfer [10] files with total size of [" + bytes + "]");
    assertTrue(ex.getCause() instanceof NullPointerException);
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) RecoverFilesRecoveryException(org.elasticsearch.indices.recovery.RecoverFilesRecoveryException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue)

Example 20 with ByteSizeValue

use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.

the class BulkProcessorIT method testBulkProcessorConcurrentRequestsNoNodeAvailableException.

//https://github.com/elastic/elasticsearch/issues/5038
public void testBulkProcessorConcurrentRequestsNoNodeAvailableException() throws Exception {
    //we create a transport client with no nodes to make sure it throws NoNodeAvailableException
    Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
    Client transportClient = new MockTransportClient(settings);
    int bulkActions = randomIntBetween(10, 100);
    int numDocs = randomIntBetween(bulkActions, bulkActions + 100);
    int concurrentRequests = randomIntBetween(0, 10);
    int expectedBulkActions = numDocs / bulkActions;
    final CountDownLatch latch = new CountDownLatch(expectedBulkActions);
    int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1;
    final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions);
    BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch);
    try (BulkProcessor processor = BulkProcessor.builder(transportClient, listener).setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions).setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
        indexDocs(transportClient, processor, numDocs);
        latch.await();
        assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions));
        assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions));
        assertThat(listener.bulkFailures.size(), equalTo(expectedBulkActions));
        assertThat(listener.bulkItems.size(), equalTo(0));
    }
    closeLatch.await();
    assertThat(listener.bulkFailures.size(), equalTo(totalExpectedBulkActions));
    assertThat(listener.bulkItems.size(), equalTo(0));
    transportClient.close();
}
Also used : ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) MockTransportClient(org.elasticsearch.transport.MockTransportClient) Client(org.elasticsearch.client.Client) MockTransportClient(org.elasticsearch.transport.MockTransportClient) CountDownLatch(java.util.concurrent.CountDownLatch) Settings(org.elasticsearch.common.settings.Settings)

Aggregations

ByteSizeValue (org.elasticsearch.common.unit.ByteSizeValue)146 Settings (org.elasticsearch.common.settings.Settings)23 Test (org.junit.Test)21 IOException (java.io.IOException)16 CountDownLatch (java.util.concurrent.CountDownLatch)13 ArrayList (java.util.ArrayList)11 TimeValue (org.elasticsearch.common.unit.TimeValue)11 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9 Matchers.containsString (org.hamcrest.Matchers.containsString)9 List (java.util.List)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 Path (java.nio.file.Path)7 Translog (org.elasticsearch.index.translog.Translog)7 Arrays (java.util.Arrays)6 Collections (java.util.Collections)6 Collectors (java.util.stream.Collectors)6 BulkProcessor (org.elasticsearch.action.bulk.BulkProcessor)6 BulkRequest (org.elasticsearch.action.bulk.BulkRequest)6 BytesArray (org.elasticsearch.common.bytes.BytesArray)6 Matchers.equalTo (org.hamcrest.Matchers.equalTo)6