Search in sources :

Example 1 with SimpleMemoryPool

use of org.apache.kafka.common.memory.SimpleMemoryPool in project apache-kafka-on-k8s by banzaicloud.

the class SelectorTest method testMuteOnOOM.

@Test
public void testMuteOnOOM() throws Exception {
    // clean up default selector, replace it with one that uses a finite mem pool
    selector.close();
    MemoryPool pool = new SimpleMemoryPool(900, 900, false, null);
    selector = new Selector(NetworkReceive.UNLIMITED, 5000, metrics, time, "MetricGroup", new HashMap<String, String>(), true, false, channelBuilder, pool, new LogContext());
    try (ServerSocketChannel ss = ServerSocketChannel.open()) {
        ss.bind(new InetSocketAddress(0));
        InetSocketAddress serverAddress = (InetSocketAddress) ss.getLocalAddress();
        Thread sender1 = createSender(serverAddress, randomPayload(900));
        Thread sender2 = createSender(serverAddress, randomPayload(900));
        sender1.start();
        sender2.start();
        // wait until everything has been flushed out to network (assuming payload size is smaller than OS buffer size)
        // this is important because we assume both requests' prefixes (1st 4 bytes) have made it.
        sender1.join(5000);
        sender2.join(5000);
        // not defined if its 1 or 2
        SocketChannel channelX = ss.accept();
        channelX.configureBlocking(false);
        SocketChannel channelY = ss.accept();
        channelY.configureBlocking(false);
        selector.register("clientX", channelX);
        selector.register("clientY", channelY);
        List<NetworkReceive> completed = Collections.emptyList();
        long deadline = System.currentTimeMillis() + 5000;
        while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
            selector.poll(1000);
            completed = selector.completedReceives();
        }
        assertEquals("could not read a single request within timeout", 1, completed.size());
        NetworkReceive firstReceive = completed.get(0);
        assertEquals(0, pool.availableMemory());
        assertTrue(selector.isOutOfMemory());
        selector.poll(10);
        assertTrue(selector.completedReceives().isEmpty());
        assertEquals(0, pool.availableMemory());
        assertTrue(selector.isOutOfMemory());
        firstReceive.close();
        // memory has been released back to pool
        assertEquals(900, pool.availableMemory());
        completed = Collections.emptyList();
        deadline = System.currentTimeMillis() + 5000;
        while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
            selector.poll(1000);
            completed = selector.completedReceives();
        }
        assertEquals("could not read a single request within timeout", 1, selector.completedReceives().size());
        assertEquals(0, pool.availableMemory());
        assertFalse(selector.isOutOfMemory());
    }
}
Also used : SocketChannel(java.nio.channels.SocketChannel) ServerSocketChannel(java.nio.channels.ServerSocketChannel) HashMap(java.util.HashMap) SimpleMemoryPool(org.apache.kafka.common.memory.SimpleMemoryPool) InetSocketAddress(java.net.InetSocketAddress) LogContext(org.apache.kafka.common.utils.LogContext) ServerSocketChannel(java.nio.channels.ServerSocketChannel) MemoryPool(org.apache.kafka.common.memory.MemoryPool) SimpleMemoryPool(org.apache.kafka.common.memory.SimpleMemoryPool) Test(org.junit.Test)

Example 2 with SimpleMemoryPool

use of org.apache.kafka.common.memory.SimpleMemoryPool in project apache-kafka-on-k8s by banzaicloud.

the class SslSelectorTest method testMuteOnOOM.

@Override
public void testMuteOnOOM() throws Exception {
    // clean up default selector, replace it with one that uses a finite mem pool
    selector.close();
    MemoryPool pool = new SimpleMemoryPool(900, 900, false, null);
    // the initial channel builder is for clients, we need a server one
    File trustStoreFile = File.createTempFile("truststore", ".jks");
    Map<String, Object> sslServerConfigs = TestSslUtils.createSslConfig(false, true, Mode.SERVER, trustStoreFile, "server");
    channelBuilder = new SslChannelBuilder(Mode.SERVER, null, false);
    channelBuilder.configure(sslServerConfigs);
    selector = new Selector(NetworkReceive.UNLIMITED, 5000, metrics, time, "MetricGroup", new HashMap<String, String>(), true, false, channelBuilder, pool, new LogContext());
    try (ServerSocketChannel ss = ServerSocketChannel.open()) {
        ss.bind(new InetSocketAddress(0));
        InetSocketAddress serverAddress = (InetSocketAddress) ss.getLocalAddress();
        SslSender sender1 = createSender(serverAddress, randomPayload(900));
        SslSender sender2 = createSender(serverAddress, randomPayload(900));
        sender1.start();
        sender2.start();
        // not defined if its 1 or 2
        SocketChannel channelX = ss.accept();
        channelX.configureBlocking(false);
        SocketChannel channelY = ss.accept();
        channelY.configureBlocking(false);
        selector.register("clientX", channelX);
        selector.register("clientY", channelY);
        boolean handshaked = false;
        NetworkReceive firstReceive = null;
        long deadline = System.currentTimeMillis() + 5000;
        // 2. a single payload is actually read out completely (the other is too big to fit)
        while (System.currentTimeMillis() < deadline) {
            selector.poll(10);
            List<NetworkReceive> completed = selector.completedReceives();
            if (firstReceive == null) {
                if (!completed.isEmpty()) {
                    assertEquals("expecting a single request", 1, completed.size());
                    firstReceive = completed.get(0);
                    assertTrue(selector.isMadeReadProgressLastPoll());
                    assertEquals(0, pool.availableMemory());
                }
            } else {
                assertTrue("only expecting single request", completed.isEmpty());
            }
            handshaked = sender1.waitForHandshake(1) && sender2.waitForHandshake(1);
            if (handshaked && firstReceive != null && selector.isOutOfMemory())
                break;
        }
        assertTrue("could not initiate connections within timeout", handshaked);
        selector.poll(10);
        assertTrue(selector.completedReceives().isEmpty());
        assertEquals(0, pool.availableMemory());
        assertNotNull("First receive not complete", firstReceive);
        assertTrue("Selector not out of memory", selector.isOutOfMemory());
        firstReceive.close();
        // memory has been released back to pool
        assertEquals(900, pool.availableMemory());
        List<NetworkReceive> completed = Collections.emptyList();
        deadline = System.currentTimeMillis() + 5000;
        while (System.currentTimeMillis() < deadline && completed.isEmpty()) {
            selector.poll(1000);
            completed = selector.completedReceives();
        }
        assertEquals("could not read remaining request within timeout", 1, completed.size());
        assertEquals(0, pool.availableMemory());
        assertFalse(selector.isOutOfMemory());
    }
}
Also used : SocketChannel(java.nio.channels.SocketChannel) ServerSocketChannel(java.nio.channels.ServerSocketChannel) HashMap(java.util.HashMap) InetSocketAddress(java.net.InetSocketAddress) LogContext(org.apache.kafka.common.utils.LogContext) SimpleMemoryPool(org.apache.kafka.common.memory.SimpleMemoryPool) File(java.io.File) ServerSocketChannel(java.nio.channels.ServerSocketChannel) MemoryPool(org.apache.kafka.common.memory.MemoryPool) SimpleMemoryPool(org.apache.kafka.common.memory.SimpleMemoryPool)

Aggregations

InetSocketAddress (java.net.InetSocketAddress)2 ServerSocketChannel (java.nio.channels.ServerSocketChannel)2 SocketChannel (java.nio.channels.SocketChannel)2 HashMap (java.util.HashMap)2 MemoryPool (org.apache.kafka.common.memory.MemoryPool)2 SimpleMemoryPool (org.apache.kafka.common.memory.SimpleMemoryPool)2 LogContext (org.apache.kafka.common.utils.LogContext)2 File (java.io.File)1 Test (org.junit.Test)1