use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class SelectorTest method testMuteOnOOM.
@Test
public void testMuteOnOOM() throws Exception {
// clean up default selector, replace it with one that uses a finite mem pool
selector.close();
MemoryPool pool = new SimpleMemoryPool(900, 900, false, null);
selector = new Selector(NetworkReceive.UNLIMITED, 5000, metrics, time, "MetricGroup", new HashMap<String, String>(), true, false, channelBuilder, pool, new LogContext());
try (ServerSocketChannel ss = ServerSocketChannel.open()) {
ss.bind(new InetSocketAddress(0));
InetSocketAddress serverAddress = (InetSocketAddress) ss.getLocalAddress();
Thread sender1 = createSender(serverAddress, randomPayload(900));
Thread sender2 = createSender(serverAddress, randomPayload(900));
sender1.start();
sender2.start();
// wait until everything has been flushed out to network (assuming payload size is smaller than OS buffer size)
// this is important because we assume both requests' prefixes (1st 4 bytes) have made it.
sender1.join(5000);
sender2.join(5000);
// not defined if its 1 or 2
SocketChannel channelX = ss.accept();
channelX.configureBlocking(false);
SocketChannel channelY = ss.accept();
channelY.configureBlocking(false);
selector.register("clientX", channelX);
selector.register("clientY", channelY);
List<NetworkReceive> completed = Collections.emptyList();
long deadline = System.currentTimeMillis() + 5000;
while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
selector.poll(1000);
completed = selector.completedReceives();
}
assertEquals("could not read a single request within timeout", 1, completed.size());
NetworkReceive firstReceive = completed.get(0);
assertEquals(0, pool.availableMemory());
assertTrue(selector.isOutOfMemory());
selector.poll(10);
assertTrue(selector.completedReceives().isEmpty());
assertEquals(0, pool.availableMemory());
assertTrue(selector.isOutOfMemory());
firstReceive.close();
// memory has been released back to pool
assertEquals(900, pool.availableMemory());
completed = Collections.emptyList();
deadline = System.currentTimeMillis() + 5000;
while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
selector.poll(1000);
completed = selector.completedReceives();
}
assertEquals("could not read a single request within timeout", 1, selector.completedReceives().size());
assertEquals(0, pool.availableMemory());
assertFalse(selector.isOutOfMemory());
}
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class SslSelectorTest method setUp.
@Before
public void setUp() throws Exception {
File trustStoreFile = File.createTempFile("truststore", ".jks");
Map<String, Object> sslServerConfigs = TestSslUtils.createSslConfig(false, true, Mode.SERVER, trustStoreFile, "server");
this.server = new EchoServer(SecurityProtocol.SSL, sslServerConfigs);
this.server.start();
this.time = new MockTime();
sslClientConfigs = TestSslUtils.createSslConfig(false, false, Mode.CLIENT, trustStoreFile, "client");
this.channelBuilder = new SslChannelBuilder(Mode.CLIENT, null, false);
this.channelBuilder.configure(sslClientConfigs);
this.metrics = new Metrics();
this.selector = new Selector(5000, metrics, time, "MetricGroup", channelBuilder, new LogContext());
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class SslSelectorTest method testMuteOnOOM.
@Override
public void testMuteOnOOM() throws Exception {
// clean up default selector, replace it with one that uses a finite mem pool
selector.close();
MemoryPool pool = new SimpleMemoryPool(900, 900, false, null);
// the initial channel builder is for clients, we need a server one
File trustStoreFile = File.createTempFile("truststore", ".jks");
Map<String, Object> sslServerConfigs = TestSslUtils.createSslConfig(false, true, Mode.SERVER, trustStoreFile, "server");
channelBuilder = new SslChannelBuilder(Mode.SERVER, null, false);
channelBuilder.configure(sslServerConfigs);
selector = new Selector(NetworkReceive.UNLIMITED, 5000, metrics, time, "MetricGroup", new HashMap<String, String>(), true, false, channelBuilder, pool, new LogContext());
try (ServerSocketChannel ss = ServerSocketChannel.open()) {
ss.bind(new InetSocketAddress(0));
InetSocketAddress serverAddress = (InetSocketAddress) ss.getLocalAddress();
SslSender sender1 = createSender(serverAddress, randomPayload(900));
SslSender sender2 = createSender(serverAddress, randomPayload(900));
sender1.start();
sender2.start();
// not defined if its 1 or 2
SocketChannel channelX = ss.accept();
channelX.configureBlocking(false);
SocketChannel channelY = ss.accept();
channelY.configureBlocking(false);
selector.register("clientX", channelX);
selector.register("clientY", channelY);
boolean handshaked = false;
NetworkReceive firstReceive = null;
long deadline = System.currentTimeMillis() + 5000;
// 2. a single payload is actually read out completely (the other is too big to fit)
while (System.currentTimeMillis() < deadline) {
selector.poll(10);
List<NetworkReceive> completed = selector.completedReceives();
if (firstReceive == null) {
if (!completed.isEmpty()) {
assertEquals("expecting a single request", 1, completed.size());
firstReceive = completed.get(0);
assertTrue(selector.isMadeReadProgressLastPoll());
assertEquals(0, pool.availableMemory());
}
} else {
assertTrue("only expecting single request", completed.isEmpty());
}
handshaked = sender1.waitForHandshake(1) && sender2.waitForHandshake(1);
if (handshaked && firstReceive != null && selector.isOutOfMemory())
break;
}
assertTrue("could not initiate connections within timeout", handshaked);
selector.poll(10);
assertTrue(selector.completedReceives().isEmpty());
assertEquals(0, pool.availableMemory());
assertNotNull("First receive not complete", firstReceive);
assertTrue("Selector not out of memory", selector.isOutOfMemory());
firstReceive.close();
// memory has been released back to pool
assertEquals(900, pool.availableMemory());
List<NetworkReceive> completed = Collections.emptyList();
deadline = System.currentTimeMillis() + 5000;
while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
selector.poll(1000);
completed = selector.completedReceives();
}
assertEquals("could not read remaining request within timeout", 1, completed.size());
assertEquals(0, pool.availableMemory());
assertFalse(selector.isOutOfMemory());
}
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class StreamTaskTest method shouldFlushRecordCollectorOnFlushState.
@Test
public void shouldFlushRecordCollectorOnFlushState() {
final AtomicBoolean flushed = new AtomicBoolean(false);
final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
final StreamTask streamTask = new StreamTask(taskId00, partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer) {
@Override
RecordCollector createRecordCollector(final LogContext logContext, final ProductionExceptionHandler exHandler) {
return new NoOpRecordCollector() {
@Override
public void flush() {
flushed.set(true);
}
};
}
};
streamTask.flushState();
assertTrue(flushed.get());
}
use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.
the class StreamThreadTest method shouldNotCommitBeforeTheCommitInterval.
@SuppressWarnings({ "unchecked", "ThrowableNotThrown" })
@Test
public void shouldNotCommitBeforeTheCommitInterval() {
final long commitInterval = 1000L;
final Properties props = configProps(false);
props.setProperty(StreamsConfig.STATE_DIR_CONFIG, stateDir);
props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval));
final StreamsConfig config = new StreamsConfig(props);
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final TaskManager taskManager = mockTaskManagerCommit(consumer, 1, 1);
StreamThread.StreamsMetricsThreadImpl streamsMetrics = new StreamThread.StreamsMetricsThreadImpl(metrics, "", "", Collections.<String, String>emptyMap());
final StreamThread thread = new StreamThread(mockTime, config, consumer, consumer, null, taskManager, streamsMetrics, internalTopologyBuilder, clientId, new LogContext(""));
thread.maybeCommit(mockTime.milliseconds());
mockTime.sleep(commitInterval - 10L);
thread.maybeCommit(mockTime.milliseconds());
EasyMock.verify(taskManager);
}
Aggregations