use of org.elasticsearch.test.MockLogAppender in project elasticsearch by elastic.
the class MaxMapCountCheckTests method testGetMaxMapCount.
public void testGetMaxMapCount() throws IOException, IllegalAccessException {
final long procSysVmMaxMapCount = randomIntBetween(1, Integer.MAX_VALUE);
final BufferedReader reader = mock(BufferedReader.class);
when(reader.readLine()).thenReturn(Long.toString(procSysVmMaxMapCount));
final Path procSysVmMaxMapCountPath = PathUtils.get("/proc/sys/vm/max_map_count");
BootstrapChecks.MaxMapCountCheck check = new BootstrapChecks.MaxMapCountCheck() {
@Override
BufferedReader getBufferedReader(Path path) throws IOException {
assertEquals(path, procSysVmMaxMapCountPath);
return reader;
}
};
assertThat(check.getMaxMapCount(), equalTo(procSysVmMaxMapCount));
verify(reader).close();
{
reset(reader);
final IOException ioException = new IOException("fatal");
when(reader.readLine()).thenThrow(ioException);
final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountIOException");
final MockLogAppender appender = new MockLogAppender();
appender.start();
appender.addExpectation(new ParameterizedMessageLoggingExpectation("expected logged I/O exception", "testGetMaxMapCountIOException", Level.WARN, "I/O exception while trying to read [{}]", new Object[] { procSysVmMaxMapCountPath }, e -> ioException == e));
Loggers.addAppender(logger, appender);
assertThat(check.getMaxMapCount(logger), equalTo(-1L));
appender.assertAllExpectationsMatched();
verify(reader).close();
Loggers.removeAppender(logger, appender);
appender.stop();
}
{
reset(reader);
when(reader.readLine()).thenReturn("eof");
final Logger logger = ESLoggerFactory.getLogger("testGetMaxMapCountNumberFormatException");
final MockLogAppender appender = new MockLogAppender();
appender.start();
appender.addExpectation(new ParameterizedMessageLoggingExpectation("expected logged number format exception", "testGetMaxMapCountNumberFormatException", Level.WARN, "unable to parse vm.max_map_count [{}]", new Object[] { "eof" }, e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\"")));
Loggers.addAppender(logger, appender);
assertThat(check.getMaxMapCount(logger), equalTo(-1L));
appender.assertAllExpectationsMatched();
verify(reader).close();
Loggers.removeAppender(logger, appender);
appender.stop();
}
}
use of org.elasticsearch.test.MockLogAppender in project elasticsearch by elastic.
the class ClusterServiceTests method testClusterStateUpdateLogging.
// To ensure that we log cluster state events on TRACE level
@TestLogging("org.elasticsearch.cluster.service:TRACE")
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService", Level.DEBUG, "*processing [test1]: took [1s] no change in cluster_state"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService", Level.TRACE, "*failed to execute cluster state update in [2s]*"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService", Level.DEBUG, "*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
Loggers.addAppender(clusterLogger, mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
clusterService.currentTimeOverride = System.nanoTime();
clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(2).nanos();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Exception e) {
latch.countDown();
}
});
clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(3).nanos();
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
latch.await();
} finally {
Loggers.removeAppender(clusterLogger, mockAppender);
mockAppender.stop();
}
mockAppender.assertAllExpectationsMatched();
}
use of org.elasticsearch.test.MockLogAppender in project crate by crate.
the class CoordinatorTests method testLogsWarningPeriodicallyIfClusterNotFormed.
public void testLogsWarningPeriodicallyIfClusterNotFormed() throws IllegalAccessException {
final long warningDelayMillis;
final Settings settings;
if (randomBoolean()) {
settings = Settings.EMPTY;
warningDelayMillis = ClusterFormationFailureHelper.DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING.get(settings).millis();
} else {
warningDelayMillis = randomLongBetween(1, 100000);
settings = Settings.builder().put(ClusterFormationFailureHelper.DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING.getKey(), warningDelayMillis + "ms").build();
}
logger.info("--> emitting warnings every [{}ms]", warningDelayMillis);
try (Cluster cluster = new Cluster(3, true, settings)) {
cluster.runRandomly();
cluster.stabilise();
logger.info("--> disconnecting all nodes");
for (final ClusterNode clusterNode : cluster.clusterNodes) {
clusterNode.disconnect();
}
cluster.runFor(// to wait for any in-flight check to time out
defaultMillis(LEADER_CHECK_TIMEOUT_SETTING) + // to wait for the next check to be sent
defaultMillis(LEADER_CHECK_INTERVAL_SETTING) + // to send the failing check and receive the disconnection response
2 * DEFAULT_DELAY_VARIABILITY, "waiting for leader failure");
for (final ClusterNode clusterNode : cluster.clusterNodes) {
assertThat(clusterNode.getId() + " is CANDIDATE", clusterNode.coordinator.getMode(), is(CANDIDATE));
}
for (int i = scaledRandomIntBetween(1, 10); i >= 0; i--) {
final MockLogAppender mockLogAppender = new MockLogAppender();
try {
mockLogAppender.start();
Loggers.addAppender(LogManager.getLogger(ClusterFormationFailureHelper.class), mockLogAppender);
mockLogAppender.addExpectation(new MockLogAppender.LoggingExpectation() {
final Set<DiscoveryNode> nodesLogged = new HashSet<>();
@Override
public void match(LogEvent event) {
final String message = event.getMessage().getFormattedMessage();
assertThat(message, startsWith("master not discovered or elected yet, an election requires at least 2 nodes with ids from ["));
final List<ClusterNode> matchingNodes = cluster.clusterNodes.stream().filter(n -> event.getContextData().<String>getValue(NODE_ID_LOG_CONTEXT_KEY).equals(getNodeIdForLogContext(n.getLocalNode()))).collect(Collectors.toList());
assertThat(matchingNodes, hasSize(1));
assertTrue(Regex.simpleMatch("*have discovered *" + matchingNodes.get(0).toString() + "*discovery will continue*", message));
nodesLogged.add(matchingNodes.get(0).getLocalNode());
}
@Override
public void assertMatched() {
assertThat(nodesLogged + " vs " + cluster.clusterNodes, nodesLogged, equalTo(cluster.clusterNodes.stream().map(ClusterNode::getLocalNode).collect(Collectors.toSet())));
}
});
cluster.runFor(warningDelayMillis + DEFAULT_DELAY_VARIABILITY, "waiting for warning to be emitted");
mockLogAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(LogManager.getLogger(ClusterFormationFailureHelper.class), mockLogAppender);
mockLogAppender.stop();
}
}
}
}
use of org.elasticsearch.test.MockLogAppender in project crate by crate.
the class CoordinatorTests method testLogsMessagesIfPublicationDelayed.
public void testLogsMessagesIfPublicationDelayed() throws IllegalAccessException {
try (Cluster cluster = new Cluster(between(3, 5))) {
cluster.runRandomly();
cluster.stabilise();
final ClusterNode brokenNode = cluster.getAnyNodeExcept(cluster.getAnyLeader());
final MockLogAppender mockLogAppender = new MockLogAppender();
try {
mockLogAppender.start();
Loggers.addAppender(LogManager.getLogger(Coordinator.CoordinatorPublication.class), mockLogAppender);
Loggers.addAppender(LogManager.getLogger(LagDetector.class), mockLogAppender);
mockLogAppender.addExpectation(new MockLogAppender.SeenEventExpectation("publication info message", Coordinator.CoordinatorPublication.class.getCanonicalName(), Level.INFO, "after [*] publication of cluster state version [*] is still waiting for " + brokenNode.getLocalNode() + " [" + Publication.PublicationTargetState.SENT_PUBLISH_REQUEST + ']'));
mockLogAppender.addExpectation(new MockLogAppender.SeenEventExpectation("publication warning", Coordinator.CoordinatorPublication.class.getCanonicalName(), Level.WARN, "after [*] publication of cluster state version [*] is still waiting for " + brokenNode.getLocalNode() + " [" + Publication.PublicationTargetState.SENT_PUBLISH_REQUEST + ']'));
mockLogAppender.addExpectation(new MockLogAppender.SeenEventExpectation("lag warning", LagDetector.class.getCanonicalName(), Level.WARN, "node [" + brokenNode + "] is lagging at cluster state version [*], " + "although publication of cluster state version [*] completed [*] ago"));
// drop the publication messages to one node, but then restore connectivity so it remains in the cluster and does not fail
// health checks
brokenNode.blackhole();
cluster.deterministicTaskQueue.scheduleAt(cluster.deterministicTaskQueue.getCurrentTimeMillis() + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, new Runnable() {
@Override
public void run() {
brokenNode.heal();
}
@Override
public String toString() {
return "healing " + brokenNode;
}
});
cluster.getAnyLeader().submitValue(randomLong());
cluster.runFor(defaultMillis(PUBLISH_TIMEOUT_SETTING) + 2 * DEFAULT_DELAY_VARIABILITY + defaultMillis(LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING), "waiting for messages to be emitted");
mockLogAppender.assertAllExpectationsMatched();
} finally {
Loggers.removeAppender(LogManager.getLogger(Coordinator.CoordinatorPublication.class), mockLogAppender);
Loggers.removeAppender(LogManager.getLogger(LagDetector.class), mockLogAppender);
mockLogAppender.stop();
}
}
}
use of org.elasticsearch.test.MockLogAppender in project crate by crate.
the class MasterServiceTests method testClusterStateUpdateLogging.
// To ensure that we log cluster state events on TRACE level
@TestLogging("org.elasticsearch.cluster.service:TRACE")
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.start();
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1 start", MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test1]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1 computation", MasterService.class.getCanonicalName(), Level.DEBUG, "took [1s] to compute cluster state update for [test1]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1 notification", MasterService.class.getCanonicalName(), Level.DEBUG, "took [0s] to notify listeners on unchanged cluster state for [test1]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2 start", MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test2]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2 failure", MasterService.class.getCanonicalName(), Level.TRACE, "failed to execute cluster state update (on version: [*], uuid: [*]) for [test2]*"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2 computation", MasterService.class.getCanonicalName(), Level.DEBUG, "took [2s] to compute cluster state update for [test2]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2 notification", MasterService.class.getCanonicalName(), Level.DEBUG, "took [0s] to notify listeners on unchanged cluster state for [test2]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3 start", MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test3]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3 computation", MasterService.class.getCanonicalName(), Level.DEBUG, "took [3s] to compute cluster state update for [test3]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3 notification", MasterService.class.getCanonicalName(), Level.DEBUG, "took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [test3]"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", MasterService.class.getCanonicalName(), Level.DEBUG, "executing cluster state update for [test4]"));
Logger clusterLogger = LogManager.getLogger(MasterService.class);
Loggers.addAppender(clusterLogger, mockAppender);
try (MasterService masterService = createMasterService(true)) {
masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Exception e) {
}
});
masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis();
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
relativeTimeInMillis += TimeValue.timeValueSeconds(4).millis();
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
@Override
public void onFailure(String source, Exception e) {
fail();
}
});
assertBusy(mockAppender::assertAllExpectationsMatched);
} finally {
Loggers.removeAppender(clusterLogger, mockAppender);
mockAppender.stop();
}
}
Aggregations