Search in sources :

Example 11 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class SelectorTest method testMuteOnOOM.

@Test
public void testMuteOnOOM() throws Exception {
    // clean up default selector, replace it with one that uses a finite mem pool
    selector.close();
    MemoryPool pool = new SimpleMemoryPool(900, 900, false, null);
    selector = new Selector(NetworkReceive.UNLIMITED, 5000, metrics, time, "MetricGroup", new HashMap<String, String>(), true, false, channelBuilder, pool, new LogContext());
    try (ServerSocketChannel ss = ServerSocketChannel.open()) {
        ss.bind(new InetSocketAddress(0));
        InetSocketAddress serverAddress = (InetSocketAddress) ss.getLocalAddress();
        Thread sender1 = createSender(serverAddress, randomPayload(900));
        Thread sender2 = createSender(serverAddress, randomPayload(900));
        sender1.start();
        sender2.start();
        // wait until everything has been flushed out to network (assuming payload size is smaller than OS buffer size)
        // this is important because we assume both requests' prefixes (1st 4 bytes) have made it.
        sender1.join(5000);
        sender2.join(5000);
        // not defined if its 1 or 2
        SocketChannel channelX = ss.accept();
        channelX.configureBlocking(false);
        SocketChannel channelY = ss.accept();
        channelY.configureBlocking(false);
        selector.register("clientX", channelX);
        selector.register("clientY", channelY);
        List<NetworkReceive> completed = Collections.emptyList();
        long deadline = System.currentTimeMillis() + 5000;
        while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
            selector.poll(1000);
            completed = selector.completedReceives();
        }
        assertEquals("could not read a single request within timeout", 1, completed.size());
        NetworkReceive firstReceive = completed.get(0);
        assertEquals(0, pool.availableMemory());
        assertTrue(selector.isOutOfMemory());
        selector.poll(10);
        assertTrue(selector.completedReceives().isEmpty());
        assertEquals(0, pool.availableMemory());
        assertTrue(selector.isOutOfMemory());
        firstReceive.close();
        // memory has been released back to pool
        assertEquals(900, pool.availableMemory());
        completed = Collections.emptyList();
        deadline = System.currentTimeMillis() + 5000;
        while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
            selector.poll(1000);
            completed = selector.completedReceives();
        }
        assertEquals("could not read a single request within timeout", 1, selector.completedReceives().size());
        assertEquals(0, pool.availableMemory());
        assertFalse(selector.isOutOfMemory());
    }
}
Also used : SocketChannel(java.nio.channels.SocketChannel) ServerSocketChannel(java.nio.channels.ServerSocketChannel) HashMap(java.util.HashMap) SimpleMemoryPool(org.apache.kafka.common.memory.SimpleMemoryPool) InetSocketAddress(java.net.InetSocketAddress) LogContext(org.apache.kafka.common.utils.LogContext) ServerSocketChannel(java.nio.channels.ServerSocketChannel) MemoryPool(org.apache.kafka.common.memory.MemoryPool) SimpleMemoryPool(org.apache.kafka.common.memory.SimpleMemoryPool) Test(org.junit.Test)

Example 12 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class SslSelectorTest method setUp.

@Before
public void setUp() throws Exception {
    File trustStoreFile = File.createTempFile("truststore", ".jks");
    Map<String, Object> sslServerConfigs = TestSslUtils.createSslConfig(false, true, Mode.SERVER, trustStoreFile, "server");
    this.server = new EchoServer(SecurityProtocol.SSL, sslServerConfigs);
    this.server.start();
    this.time = new MockTime();
    sslClientConfigs = TestSslUtils.createSslConfig(false, false, Mode.CLIENT, trustStoreFile, "client");
    this.channelBuilder = new SslChannelBuilder(Mode.CLIENT, null, false);
    this.channelBuilder.configure(sslClientConfigs);
    this.metrics = new Metrics();
    this.selector = new Selector(5000, metrics, time, "MetricGroup", channelBuilder, new LogContext());
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) LogContext(org.apache.kafka.common.utils.LogContext) File(java.io.File) MockTime(org.apache.kafka.common.utils.MockTime) Before(org.junit.Before)

Example 13 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class SslSelectorTest method testMuteOnOOM.

@Override
public void testMuteOnOOM() throws Exception {
    // clean up default selector, replace it with one that uses a finite mem pool
    selector.close();
    MemoryPool pool = new SimpleMemoryPool(900, 900, false, null);
    // the initial channel builder is for clients, we need a server one
    File trustStoreFile = File.createTempFile("truststore", ".jks");
    Map<String, Object> sslServerConfigs = TestSslUtils.createSslConfig(false, true, Mode.SERVER, trustStoreFile, "server");
    channelBuilder = new SslChannelBuilder(Mode.SERVER, null, false);
    channelBuilder.configure(sslServerConfigs);
    selector = new Selector(NetworkReceive.UNLIMITED, 5000, metrics, time, "MetricGroup", new HashMap<String, String>(), true, false, channelBuilder, pool, new LogContext());
    try (ServerSocketChannel ss = ServerSocketChannel.open()) {
        ss.bind(new InetSocketAddress(0));
        InetSocketAddress serverAddress = (InetSocketAddress) ss.getLocalAddress();
        SslSender sender1 = createSender(serverAddress, randomPayload(900));
        SslSender sender2 = createSender(serverAddress, randomPayload(900));
        sender1.start();
        sender2.start();
        // not defined if its 1 or 2
        SocketChannel channelX = ss.accept();
        channelX.configureBlocking(false);
        SocketChannel channelY = ss.accept();
        channelY.configureBlocking(false);
        selector.register("clientX", channelX);
        selector.register("clientY", channelY);
        boolean handshaked = false;
        NetworkReceive firstReceive = null;
        long deadline = System.currentTimeMillis() + 5000;
        // 2. a single payload is actually read out completely (the other is too big to fit)
        while (System.currentTimeMillis() < deadline) {
            selector.poll(10);
            List<NetworkReceive> completed = selector.completedReceives();
            if (firstReceive == null) {
                if (!completed.isEmpty()) {
                    assertEquals("expecting a single request", 1, completed.size());
                    firstReceive = completed.get(0);
                    assertTrue(selector.isMadeReadProgressLastPoll());
                    assertEquals(0, pool.availableMemory());
                }
            } else {
                assertTrue("only expecting single request", completed.isEmpty());
            }
            handshaked = sender1.waitForHandshake(1) && sender2.waitForHandshake(1);
            if (handshaked && firstReceive != null && selector.isOutOfMemory())
                break;
        }
        assertTrue("could not initiate connections within timeout", handshaked);
        selector.poll(10);
        assertTrue(selector.completedReceives().isEmpty());
        assertEquals(0, pool.availableMemory());
        assertNotNull("First receive not complete", firstReceive);
        assertTrue("Selector not out of memory", selector.isOutOfMemory());
        firstReceive.close();
        // memory has been released back to pool
        assertEquals(900, pool.availableMemory());
        List<NetworkReceive> completed = Collections.emptyList();
        deadline = System.currentTimeMillis() + 5000;
        while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
            selector.poll(1000);
            completed = selector.completedReceives();
        }
        assertEquals("could not read remaining request within timeout", 1, completed.size());
        assertEquals(0, pool.availableMemory());
        assertFalse(selector.isOutOfMemory());
    }
}
Also used : SocketChannel(java.nio.channels.SocketChannel) ServerSocketChannel(java.nio.channels.ServerSocketChannel) HashMap(java.util.HashMap) InetSocketAddress(java.net.InetSocketAddress) LogContext(org.apache.kafka.common.utils.LogContext) SimpleMemoryPool(org.apache.kafka.common.memory.SimpleMemoryPool) File(java.io.File) ServerSocketChannel(java.nio.channels.ServerSocketChannel) MemoryPool(org.apache.kafka.common.memory.MemoryPool) SimpleMemoryPool(org.apache.kafka.common.memory.SimpleMemoryPool)

Example 14 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class StreamTaskTest method shouldFlushRecordCollectorOnFlushState.

@Test
public void shouldFlushRecordCollectorOnFlushState() {
    final AtomicBoolean flushed = new AtomicBoolean(false);
    final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
    final StreamTask streamTask = new StreamTask(taskId00, partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer) {

        @Override
        RecordCollector createRecordCollector(final LogContext logContext, final ProductionExceptionHandler exHandler) {
            return new NoOpRecordCollector() {

                @Override
                public void flush() {
                    flushed.set(true);
                }
            };
        }
    };
    streamTask.flushState();
    assertTrue(flushed.get());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) NoOpRecordCollector(org.apache.kafka.test.NoOpRecordCollector) LogContext(org.apache.kafka.common.utils.LogContext) ProductionExceptionHandler(org.apache.kafka.streams.errors.ProductionExceptionHandler) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) Test(org.junit.Test)

Example 15 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldNotCommitBeforeTheCommitInterval.

@SuppressWarnings({ "unchecked", "ThrowableNotThrown" })
@Test
public void shouldNotCommitBeforeTheCommitInterval() {
    final long commitInterval = 1000L;
    final Properties props = configProps(false);
    props.setProperty(StreamsConfig.STATE_DIR_CONFIG, stateDir);
    props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval));
    final StreamsConfig config = new StreamsConfig(props);
    final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
    final TaskManager taskManager = mockTaskManagerCommit(consumer, 1, 1);
    StreamThread.StreamsMetricsThreadImpl streamsMetrics = new StreamThread.StreamsMetricsThreadImpl(metrics, "", "", Collections.<String, String>emptyMap());
    final StreamThread thread = new StreamThread(mockTime, config, consumer, consumer, null, taskManager, streamsMetrics, internalTopologyBuilder, clientId, new LogContext(""));
    thread.maybeCommit(mockTime.milliseconds());
    mockTime.sleep(commitInterval - 10L);
    thread.maybeCommit(mockTime.milliseconds());
    EasyMock.verify(taskManager);
}
Also used : LogContext(org.apache.kafka.common.utils.LogContext) Properties(java.util.Properties) StreamsConfig(org.apache.kafka.streams.StreamsConfig) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Aggregations

LogContext (org.apache.kafka.common.utils.LogContext)53 Metrics (org.apache.kafka.common.metrics.Metrics)28 Test (org.junit.Test)27 Before (org.junit.Before)17 InternalMockProcessorContext (org.apache.kafka.test.InternalMockProcessorContext)14 MockStreamsMetrics (org.apache.kafka.streams.processor.internals.MockStreamsMetrics)13 NoOpRecordCollector (org.apache.kafka.test.NoOpRecordCollector)10 HashMap (java.util.HashMap)8 MockTime (org.apache.kafka.common.utils.MockTime)8 InetSocketAddress (java.net.InetSocketAddress)7 Properties (java.util.Properties)7 StreamsConfig (org.apache.kafka.streams.StreamsConfig)7 TopicPartition (org.apache.kafka.common.TopicPartition)6 File (java.io.File)5 NetworkClient (org.apache.kafka.clients.NetworkClient)5 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)5 ServerSocketChannel (java.nio.channels.ServerSocketChannel)4 SocketChannel (java.nio.channels.SocketChannel)4 Metadata (org.apache.kafka.clients.Metadata)4 MockClient (org.apache.kafka.clients.MockClient)4