use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class CorruptedTranslogIT method testCorruptTranslogFiles.
public void testCorruptTranslogFiles() throws Exception {
internalCluster().startNodes(1, Settings.EMPTY);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).put("index.refresh_interval", "-1").put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), // never flush - always recover from translog
true)));
// Index some documents
int numDocs = scaledRandomIntBetween(100, 1000);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar");
}
disableTranslogFlush("test");
// this one
indexRandom(false, false, false, Arrays.asList(builders));
// Corrupt the translog file(s)
corruptRandomTranslogFiles();
// Restart the single node
internalCluster().fullRestart();
client().admin().cluster().prepareHealth().setWaitForYellowStatus().setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get();
try {
client().prepareSearch("test").setQuery(matchAllQuery()).get();
fail("all shards should be failed due to a corrupted translog");
} catch (SearchPhaseExecutionException e) {
// Good, all shards should be failed because there is only a
// single shard and its translog is corrupt
}
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class JvmGcMonitorServiceTests method testSlowGcLogging.
public void testSlowGcLogging() {
final Logger logger = mock(Logger.class);
when(logger.isWarnEnabled()).thenReturn(true);
when(logger.isInfoEnabled()).thenReturn(true);
when(logger.isDebugEnabled()).thenReturn(true);
final JvmGcMonitorService.JvmMonitor.Threshold threshold = randomFrom(JvmGcMonitorService.JvmMonitor.Threshold.values());
final String name = randomAsciiOfLength(16);
final long seq = randomIntBetween(1, 1 << 30);
final int elapsedValue = randomIntBetween(1, 1 << 10);
final long totalCollectionCount = randomIntBetween(1, 16);
final long currentCollectionCount = randomIntBetween(1, 16);
final TimeValue totalCollectionTime = TimeValue.timeValueMillis(randomIntBetween(1, elapsedValue));
final TimeValue currentCollectionTime = TimeValue.timeValueMillis(randomIntBetween(1, elapsedValue));
final ByteSizeValue lastHeapUsed = new ByteSizeValue(randomIntBetween(1, 1 << 10));
JvmStats lastJvmStats = mock(JvmStats.class);
JvmStats.Mem lastMem = mock(JvmStats.Mem.class);
when(lastMem.getHeapUsed()).thenReturn(lastHeapUsed);
when(lastJvmStats.getMem()).thenReturn(lastMem);
when(lastJvmStats.toString()).thenReturn("last");
final ByteSizeValue currentHeapUsed = new ByteSizeValue(randomIntBetween(1, 1 << 10));
JvmStats currentJvmStats = mock(JvmStats.class);
JvmStats.Mem currentMem = mock(JvmStats.Mem.class);
when(currentMem.getHeapUsed()).thenReturn(currentHeapUsed);
when(currentJvmStats.getMem()).thenReturn(currentMem);
when(currentJvmStats.toString()).thenReturn("current");
JvmStats.GarbageCollector gc = mock(JvmStats.GarbageCollector.class);
when(gc.getName()).thenReturn(name);
when(gc.getCollectionCount()).thenReturn(totalCollectionCount);
when(gc.getCollectionTime()).thenReturn(totalCollectionTime);
final ByteSizeValue maxHeapUsed = new ByteSizeValue(Math.max(lastHeapUsed.getBytes(), currentHeapUsed.getBytes()) + 1 << 10);
JvmGcMonitorService.JvmMonitor.SlowGcEvent slowGcEvent = new JvmGcMonitorService.JvmMonitor.SlowGcEvent(gc, currentCollectionCount, currentCollectionTime, elapsedValue, lastJvmStats, currentJvmStats, maxHeapUsed);
JvmGcMonitorService.logSlowGc(logger, threshold, seq, slowGcEvent, (l, c) -> l.toString() + ", " + c.toString());
switch(threshold) {
case WARN:
verify(logger).isWarnEnabled();
verify(logger).warn("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}", name, seq, totalCollectionCount, currentCollectionTime, currentCollectionCount, TimeValue.timeValueMillis(elapsedValue), currentCollectionTime, totalCollectionTime, lastHeapUsed, currentHeapUsed, maxHeapUsed, "last, current");
break;
case INFO:
verify(logger).isInfoEnabled();
verify(logger).info("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}", name, seq, totalCollectionCount, currentCollectionTime, currentCollectionCount, TimeValue.timeValueMillis(elapsedValue), currentCollectionTime, totalCollectionTime, lastHeapUsed, currentHeapUsed, maxHeapUsed, "last, current");
break;
case DEBUG:
verify(logger).isDebugEnabled();
verify(logger).debug("[gc][{}][{}][{}] duration [{}], collections [{}]/[{}], total [{}]/[{}], memory [{}]->[{}]/[{}], all_pools {}", name, seq, totalCollectionCount, currentCollectionTime, currentCollectionCount, TimeValue.timeValueMillis(elapsedValue), currentCollectionTime, totalCollectionTime, lastHeapUsed, currentHeapUsed, maxHeapUsed, "last, current");
break;
}
verifyNoMoreInteractions(logger);
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class ActiveShardsObserver method waitForActiveShards.
/**
* Waits on the specified number of active shards to be started before executing the
*
* @param indexName the index to wait for active shards on
* @param activeShardCount the number of active shards to wait on before returning
* @param timeout the timeout value
* @param onResult a function that is executed in response to the requisite shards becoming active or a timeout (whichever comes first)
* @param onFailure a function that is executed in response to an error occurring during waiting for the active shards
*/
public void waitForActiveShards(final String indexName, final ActiveShardCount activeShardCount, final TimeValue timeout, final Consumer<Boolean> onResult, final Consumer<Exception> onFailure) {
// wait for the configured number of active shards to be allocated before executing the result consumer
if (activeShardCount == ActiveShardCount.NONE) {
// not waiting, so just run whatever we were to run when the waiting is
onResult.accept(true);
return;
}
final ClusterState state = clusterService.state();
final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, logger, threadPool.getThreadContext());
if (activeShardCount.enoughShardsActive(state, indexName)) {
onResult.accept(true);
} else {
final Predicate<ClusterState> shardsAllocatedPredicate = newState -> activeShardCount.enoughShardsActive(newState, indexName);
final ClusterStateObserver.Listener observerListener = new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
onResult.accept(true);
}
@Override
public void onClusterServiceClose() {
logger.debug("[{}] cluster service closed while waiting for enough shards to be started.", indexName);
onFailure.accept(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
onResult.accept(false);
}
};
observer.waitForNextChange(observerListener, shardsAllocatedPredicate, timeout);
}
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class MasterNodeRequest method readFrom.
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
masterNodeTimeout = new TimeValue(in);
}
use of org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class ReplicationRequest method readFrom.
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.readBoolean()) {
shardId = ShardId.readShardId(in);
} else {
shardId = null;
}
waitForActiveShards = ActiveShardCount.readFrom(in);
timeout = new TimeValue(in);
index = in.readString();
routedBasedOnClusterVersion = in.readVLong();
primaryTerm = in.readVLong();
}
Aggregations