use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class StoreRecovery method executeRecovery.
/**
* Recovers the state of the shard from the store.
*/
private boolean executeRecovery(final IndexShard indexShard, Runnable recoveryRunnable) throws IndexShardRecoveryException {
try {
recoveryRunnable.run();
// Check that the gateway didn't leave the shard in init or recovering stage. it is up to the gateway
// to call post recovery.
final IndexShardState shardState = indexShard.state();
final RecoveryState recoveryState = indexShard.recoveryState();
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
if (logger.isTraceEnabled()) {
RecoveryState.Index index = recoveryState.getIndex();
StringBuilder sb = new StringBuilder();
sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [").append(new ByteSizeValue(index.totalBytes())).append("], took[").append(TimeValue.timeValueMillis(index.time())).append("]\n");
sb.append(" : recovered_files [").append(index.recoveredFileCount()).append("] with total_size [").append(new ByteSizeValue(index.recoveredBytes())).append("]\n");
sb.append(" : reusing_files [").append(index.reusedFileCount()).append("] with total_size [").append(new ByteSizeValue(index.reusedBytes())).append("]\n");
sb.append(" verify_index : took [").append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [").append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations()).append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb);
} else if (logger.isDebugEnabled()) {
logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time()));
}
return true;
} catch (IndexShardRecoveryException e) {
if (indexShard.state() == IndexShardState.CLOSED) {
// got closed on us, just ignore this recovery
return false;
}
if ((e.getCause() instanceof IndexShardClosedException) || (e.getCause() instanceof IndexShardNotStartedException)) {
// got closed on us, just ignore this recovery
return false;
}
throw e;
} catch (IndexShardClosedException | IndexShardNotStartedException e) {
} catch (Exception e) {
if (indexShard.state() == IndexShardState.CLOSED) {
// got closed on us, just ignore this recovery
return false;
}
throw new IndexShardRecoveryException(shardId, "failed recovery", e);
}
return false;
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class IndexShard method refresh.
/**
* Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link AlreadyClosedException}.
*/
public void refresh(String source) {
verifyNotClosed();
if (canIndex()) {
long bytes = getEngine().getIndexBufferRAMBytesUsed();
writingBytes.addAndGet(bytes);
try {
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [{}] indexBufferRAMBytesUsed [{}]", source, new ByteSizeValue(bytes));
}
long time = System.nanoTime();
getEngine().refresh(source);
refreshMetric.inc(System.nanoTime() - time);
} finally {
if (logger.isTraceEnabled()) {
logger.trace("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
}
writingBytes.addAndGet(-bytes);
}
} else {
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [{}]", source);
}
long time = System.nanoTime();
getEngine().refresh(source);
refreshMetric.inc(System.nanoTime() - time);
}
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class IndexShard method writeIndexingBuffer.
/**
* Called when our shard is using too much heap and should move buffered indexed/deleted documents to disk.
*/
public void writeIndexingBuffer() {
if (canIndex() == false) {
throw new UnsupportedOperationException();
}
try {
Engine engine = getEngine();
long bytes = engine.getIndexBufferRAMBytesUsed();
// NOTE: this can be an overestimate by up to 20%, if engine uses IW.flush not refresh, because version map
// memory is low enough, but this is fine because after the writes finish, IMC will poll again and see that
// there's still up to the 20% being used and continue writing if necessary:
logger.debug("add [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
writingBytes.addAndGet(bytes);
try {
engine.writeIndexingBuffer();
} finally {
writingBytes.addAndGet(-bytes);
logger.debug("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId());
}
} catch (Exception e) {
handleRefreshException(e);
}
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class ExceptionSerializationTests method testRecoverFilesRecoveryException.
public void testRecoverFilesRecoveryException() throws IOException {
ShardId id = new ShardId("foo", "_na_", 1);
ByteSizeValue bytes = new ByteSizeValue(randomIntBetween(0, 10000));
RecoverFilesRecoveryException ex = serialize(new RecoverFilesRecoveryException(id, 10, bytes, null));
assertEquals(ex.getShardId(), id);
assertEquals(ex.numberOfFiles(), 10);
assertEquals(ex.totalFilesSize(), bytes);
assertEquals(ex.getMessage(), "Failed to transfer [10] files with total size of [" + bytes + "]");
assertNull(ex.getCause());
ex = serialize(new RecoverFilesRecoveryException(null, 10, bytes, new NullPointerException()));
assertNull(ex.getShardId());
assertEquals(ex.numberOfFiles(), 10);
assertEquals(ex.totalFilesSize(), bytes);
assertEquals(ex.getMessage(), "Failed to transfer [10] files with total size of [" + bytes + "]");
assertTrue(ex.getCause() instanceof NullPointerException);
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class BulkProcessorIT method testBulkProcessorConcurrentRequestsNoNodeAvailableException.
//https://github.com/elastic/elasticsearch/issues/5038
public void testBulkProcessorConcurrentRequestsNoNodeAvailableException() throws Exception {
//we create a transport client with no nodes to make sure it throws NoNodeAvailableException
Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
Client transportClient = new MockTransportClient(settings);
int bulkActions = randomIntBetween(10, 100);
int numDocs = randomIntBetween(bulkActions, bulkActions + 100);
int concurrentRequests = randomIntBetween(0, 10);
int expectedBulkActions = numDocs / bulkActions;
final CountDownLatch latch = new CountDownLatch(expectedBulkActions);
int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1;
final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions);
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch);
try (BulkProcessor processor = BulkProcessor.builder(transportClient, listener).setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions).setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
indexDocs(transportClient, processor, numDocs);
latch.await();
assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions));
assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions));
assertThat(listener.bulkFailures.size(), equalTo(expectedBulkActions));
assertThat(listener.bulkItems.size(), equalTo(0));
}
closeLatch.await();
assertThat(listener.bulkFailures.size(), equalTo(totalExpectedBulkActions));
assertThat(listener.bulkItems.size(), equalTo(0));
transportClient.close();
}
Aggregations