Search in sources :

Example 6 with KvStateID

use of org.apache.flink.runtime.query.KvStateID in project flink by apache.

the class JobManagerTest method testKvStateMessages.

/**
	 * Tests that the JobManager handles {@link org.apache.flink.runtime.query.KvStateMessage}
	 * instances as expected.
	 */
@Test
public void testKvStateMessages() throws Exception {
    Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow();
    Configuration config = new Configuration();
    config.setString(ConfigConstants.AKKA_ASK_TIMEOUT, "100ms");
    UUID leaderSessionId = null;
    ActorGateway jobManager = new AkkaActorGateway(JobManager.startJobManagerActors(config, system, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), TestingJobManager.class, MemoryArchivist.class)._1(), leaderSessionId);
    LeaderRetrievalService leaderRetrievalService = new StandaloneLeaderRetrievalService(AkkaUtils.getAkkaURL(system, jobManager.actor()));
    Configuration tmConfig = new Configuration();
    tmConfig.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 4);
    tmConfig.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 8);
    ActorRef taskManager = TaskManager.startTaskManagerComponentsAndActor(tmConfig, ResourceID.generate(), system, "localhost", scala.Option.<String>empty(), scala.Option.apply(leaderRetrievalService), true, TestingTaskManager.class);
    Future<Object> registrationFuture = jobManager.ask(new NotifyWhenAtLeastNumTaskManagerAreRegistered(1), deadline.timeLeft());
    Await.ready(registrationFuture, deadline.timeLeft());
    //
    // Location lookup
    //
    LookupKvStateLocation lookupNonExistingJob = new LookupKvStateLocation(new JobID(), "any-name");
    Future<KvStateLocation> lookupFuture = jobManager.ask(lookupNonExistingJob, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    try {
        Await.result(lookupFuture, deadline.timeLeft());
        fail("Did not throw expected Exception");
    } catch (IllegalStateException ignored) {
    // Expected
    }
    JobGraph jobGraph = new JobGraph("croissant");
    JobVertex jobVertex1 = new JobVertex("cappuccino");
    jobVertex1.setParallelism(4);
    jobVertex1.setMaxParallelism(16);
    jobVertex1.setInvokableClass(BlockingNoOpInvokable.class);
    JobVertex jobVertex2 = new JobVertex("americano");
    jobVertex2.setParallelism(4);
    jobVertex2.setMaxParallelism(16);
    jobVertex2.setInvokableClass(BlockingNoOpInvokable.class);
    jobGraph.addVertex(jobVertex1);
    jobGraph.addVertex(jobVertex2);
    Future<JobSubmitSuccess> submitFuture = jobManager.ask(new SubmitJob(jobGraph, ListeningBehaviour.DETACHED), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<JobSubmitSuccess>apply(JobSubmitSuccess.class));
    Await.result(submitFuture, deadline.timeLeft());
    Object lookupUnknownRegistrationName = new LookupKvStateLocation(jobGraph.getJobID(), "unknown");
    lookupFuture = jobManager.ask(lookupUnknownRegistrationName, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    try {
        Await.result(lookupFuture, deadline.timeLeft());
        fail("Did not throw expected Exception");
    } catch (UnknownKvStateLocation ignored) {
    // Expected
    }
    //
    // Registration
    //
    NotifyKvStateRegistered registerNonExistingJob = new NotifyKvStateRegistered(new JobID(), new JobVertexID(), new KeyGroupRange(0, 0), "any-name", new KvStateID(), new KvStateServerAddress(InetAddress.getLocalHost(), 1233));
    jobManager.tell(registerNonExistingJob);
    LookupKvStateLocation lookupAfterRegistration = new LookupKvStateLocation(registerNonExistingJob.getJobId(), registerNonExistingJob.getRegistrationName());
    lookupFuture = jobManager.ask(lookupAfterRegistration, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    try {
        Await.result(lookupFuture, deadline.timeLeft());
        fail("Did not throw expected Exception");
    } catch (IllegalStateException ignored) {
    // Expected
    }
    NotifyKvStateRegistered registerForExistingJob = new NotifyKvStateRegistered(jobGraph.getJobID(), jobVertex1.getID(), new KeyGroupRange(0, 0), "register-me", new KvStateID(), new KvStateServerAddress(InetAddress.getLocalHost(), 1293));
    jobManager.tell(registerForExistingJob);
    lookupAfterRegistration = new LookupKvStateLocation(registerForExistingJob.getJobId(), registerForExistingJob.getRegistrationName());
    lookupFuture = jobManager.ask(lookupAfterRegistration, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    KvStateLocation location = Await.result(lookupFuture, deadline.timeLeft());
    assertNotNull(location);
    assertEquals(jobGraph.getJobID(), location.getJobId());
    assertEquals(jobVertex1.getID(), location.getJobVertexId());
    assertEquals(jobVertex1.getMaxParallelism(), location.getNumKeyGroups());
    assertEquals(1, location.getNumRegisteredKeyGroups());
    KeyGroupRange keyGroupRange = registerForExistingJob.getKeyGroupRange();
    assertEquals(1, keyGroupRange.getNumberOfKeyGroups());
    assertEquals(registerForExistingJob.getKvStateId(), location.getKvStateID(keyGroupRange.getStartKeyGroup()));
    assertEquals(registerForExistingJob.getKvStateServerAddress(), location.getKvStateServerAddress(keyGroupRange.getStartKeyGroup()));
    //
    // Unregistration
    //
    NotifyKvStateUnregistered unregister = new NotifyKvStateUnregistered(registerForExistingJob.getJobId(), registerForExistingJob.getJobVertexId(), registerForExistingJob.getKeyGroupRange(), registerForExistingJob.getRegistrationName());
    jobManager.tell(unregister);
    lookupFuture = jobManager.ask(lookupAfterRegistration, deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<KvStateLocation>apply(KvStateLocation.class));
    try {
        Await.result(lookupFuture, deadline.timeLeft());
        fail("Did not throw expected Exception");
    } catch (UnknownKvStateLocation ignored) {
    // Expected
    }
    //
    // Duplicate registration fails task
    //
    NotifyKvStateRegistered register = new NotifyKvStateRegistered(jobGraph.getJobID(), jobVertex1.getID(), new KeyGroupRange(0, 0), "duplicate-me", new KvStateID(), new KvStateServerAddress(InetAddress.getLocalHost(), 1293));
    NotifyKvStateRegistered duplicate = new NotifyKvStateRegistered(jobGraph.getJobID(), // <--- different operator, but...
    jobVertex2.getID(), new KeyGroupRange(0, 0), // ...same name
    "duplicate-me", new KvStateID(), new KvStateServerAddress(InetAddress.getLocalHost(), 1293));
    Future<TestingJobManagerMessages.JobStatusIs> failedFuture = jobManager.ask(new NotifyWhenJobStatus(jobGraph.getJobID(), JobStatus.FAILED), deadline.timeLeft()).mapTo(ClassTag$.MODULE$.<JobStatusIs>apply(JobStatusIs.class));
    jobManager.tell(register);
    jobManager.tell(duplicate);
    // Wait for failure
    JobStatusIs jobStatus = Await.result(failedFuture, deadline.timeLeft());
    assertEquals(JobStatus.FAILED, jobStatus.state());
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) Configuration(org.apache.flink.configuration.Configuration) UnknownKvStateLocation(org.apache.flink.runtime.query.UnknownKvStateLocation) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) KvStateServerAddress(org.apache.flink.runtime.query.KvStateServerAddress) LookupKvStateLocation(org.apache.flink.runtime.query.KvStateMessage.LookupKvStateLocation) KvStateLocation(org.apache.flink.runtime.query.KvStateLocation) UnknownKvStateLocation(org.apache.flink.runtime.query.UnknownKvStateLocation) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) JobSubmitSuccess(org.apache.flink.runtime.messages.JobManagerMessages.JobSubmitSuccess) KvStateID(org.apache.flink.runtime.query.KvStateID) UUID(java.util.UUID) SubmitJob(org.apache.flink.runtime.messages.JobManagerMessages.SubmitJob) NotifyKvStateRegistered(org.apache.flink.runtime.query.KvStateMessage.NotifyKvStateRegistered) NotifyKvStateUnregistered(org.apache.flink.runtime.query.KvStateMessage.NotifyKvStateUnregistered) JobStatusIs(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.JobStatusIs) Deadline(scala.concurrent.duration.Deadline) FiniteDuration(scala.concurrent.duration.FiniteDuration) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) StandaloneLeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.StandaloneLeaderRetrievalService) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) StandaloneLeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.StandaloneLeaderRetrievalService) LookupKvStateLocation(org.apache.flink.runtime.query.KvStateMessage.LookupKvStateLocation) NotifyWhenAtLeastNumTaskManagerAreRegistered(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.NotifyWhenAtLeastNumTaskManagerAreRegistered) JobID(org.apache.flink.api.common.JobID) NotifyWhenJobStatus(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.NotifyWhenJobStatus) Test(org.junit.Test)

Example 7 with KvStateID

use of org.apache.flink.runtime.query.KvStateID in project flink by apache.

the class KvStateRequestSerializer method deserializeKvStateRequest.

/**
	 * Deserializes the KvState request message.
	 *
	 * <p><strong>Important</strong>: the returned buffer is sliced from the
	 * incoming ByteBuf stream and retained. Therefore, it needs to be recycled
	 * by the consumer.
	 *
	 * @param buf Buffer to deserialize (expected to be positioned after header)
	 * @return Deserialized KvStateRequest
	 */
public static KvStateRequest deserializeKvStateRequest(ByteBuf buf) {
    long requestId = buf.readLong();
    KvStateID kvStateId = new KvStateID(buf.readLong(), buf.readLong());
    // Serialized key and namespace
    int length = buf.readInt();
    if (length < 0) {
        throw new IllegalArgumentException("Negative length for serialized key and namespace. " + "This indicates a serialization error.");
    }
    // Copy the buffer in order to be able to safely recycle the ByteBuf
    byte[] serializedKeyAndNamespace = new byte[length];
    if (length > 0) {
        buf.readBytes(serializedKeyAndNamespace);
    }
    return new KvStateRequest(requestId, kvStateId, serializedKeyAndNamespace);
}
Also used : KvStateID(org.apache.flink.runtime.query.KvStateID)

Example 8 with KvStateID

use of org.apache.flink.runtime.query.KvStateID in project flink by apache.

the class KvStateClientTest method testClientServerIntegration.

/**
	 * Tests multiple clients querying multiple servers until 100k queries have
	 * been processed. At this point, the client is shut down and its verified
	 * that all ongoing requests are failed.
	 */
@Test
public void testClientServerIntegration() throws Exception {
    // Config
    final int numServers = 2;
    final int numServerEventLoopThreads = 2;
    final int numServerQueryThreads = 2;
    final int numClientEventLoopThreads = 4;
    final int numClientsTasks = 8;
    final int batchSize = 16;
    final int numKeyGroups = 1;
    AbstractStateBackend abstractBackend = new MemoryStateBackend();
    KvStateRegistry dummyRegistry = new KvStateRegistry();
    DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
    dummyEnv.setKvStateRegistry(dummyRegistry);
    AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(dummyEnv, new JobID(), "test_op", IntSerializer.INSTANCE, numKeyGroups, new KeyGroupRange(0, 0), dummyRegistry.createTaskRegistry(new JobID(), new JobVertexID()));
    final FiniteDuration timeout = new FiniteDuration(10, TimeUnit.SECONDS);
    AtomicKvStateRequestStats clientStats = new AtomicKvStateRequestStats();
    KvStateClient client = null;
    ExecutorService clientTaskExecutor = null;
    final KvStateServer[] server = new KvStateServer[numServers];
    try {
        client = new KvStateClient(numClientEventLoopThreads, clientStats);
        clientTaskExecutor = Executors.newFixedThreadPool(numClientsTasks);
        // Create state
        ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
        desc.setQueryable("any");
        // Create servers
        KvStateRegistry[] registry = new KvStateRegistry[numServers];
        AtomicKvStateRequestStats[] serverStats = new AtomicKvStateRequestStats[numServers];
        final KvStateID[] ids = new KvStateID[numServers];
        for (int i = 0; i < numServers; i++) {
            registry[i] = new KvStateRegistry();
            serverStats[i] = new AtomicKvStateRequestStats();
            server[i] = new KvStateServer(InetAddress.getLocalHost(), 0, numServerEventLoopThreads, numServerQueryThreads, registry[i], serverStats[i]);
            server[i].start();
            backend.setCurrentKey(1010 + i);
            // Value per server
            ValueState<Integer> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
            state.update(201 + i);
            // we know it must be a KvStat but this is not exposed to the user via State
            InternalKvState<?> kvState = (InternalKvState<?>) state;
            // Register KvState (one state instance for all server)
            ids[i] = registry[i].registerKvState(new JobID(), new JobVertexID(), new KeyGroupRange(0, 0), "any", kvState);
        }
        final KvStateClient finalClient = client;
        Callable<Void> queryTask = new Callable<Void>() {

            @Override
            public Void call() throws Exception {
                while (true) {
                    if (Thread.interrupted()) {
                        throw new InterruptedException();
                    }
                    // Random server permutation
                    List<Integer> random = new ArrayList<>();
                    for (int j = 0; j < batchSize; j++) {
                        random.add(j);
                    }
                    Collections.shuffle(random);
                    // Dispatch queries
                    List<Future<byte[]>> futures = new ArrayList<>(batchSize);
                    for (int j = 0; j < batchSize; j++) {
                        int targetServer = random.get(j) % numServers;
                        byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(1010 + targetServer, IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE);
                        futures.add(finalClient.getKvState(server[targetServer].getAddress(), ids[targetServer], serializedKeyAndNamespace));
                    }
                    // Verify results
                    for (int j = 0; j < batchSize; j++) {
                        int targetServer = random.get(j) % numServers;
                        Future<byte[]> future = futures.get(j);
                        byte[] buf = Await.result(future, timeout);
                        int value = KvStateRequestSerializer.deserializeValue(buf, IntSerializer.INSTANCE);
                        assertEquals(201 + targetServer, value);
                    }
                }
            }
        };
        // Submit tasks
        List<java.util.concurrent.Future<Void>> taskFutures = new ArrayList<>();
        for (int i = 0; i < numClientsTasks; i++) {
            taskFutures.add(clientTaskExecutor.submit(queryTask));
        }
        long numRequests;
        while ((numRequests = clientStats.getNumRequests()) < 100_000) {
            Thread.sleep(100);
            LOG.info("Number of requests {}/100_000", numRequests);
        }
        // Shut down
        client.shutDown();
        for (java.util.concurrent.Future<Void> future : taskFutures) {
            try {
                future.get();
                fail("Did not throw expected Exception after shut down");
            } catch (ExecutionException t) {
                if (t.getCause() instanceof ClosedChannelException || t.getCause() instanceof IllegalStateException) {
                // Expected
                } else {
                    t.printStackTrace();
                    fail("Failed with unexpected Exception type: " + t.getClass().getName());
                }
            }
        }
        assertEquals("Connection leak (client)", 0, clientStats.getNumConnections());
        for (int i = 0; i < numServers; i++) {
            boolean success = false;
            int numRetries = 0;
            while (!success) {
                try {
                    assertEquals("Connection leak (server)", 0, serverStats[i].getNumConnections());
                    success = true;
                } catch (Throwable t) {
                    if (numRetries < 10) {
                        LOG.info("Retrying connection leak check (server)");
                        Thread.sleep((numRetries + 1) * 50);
                        numRetries++;
                    } else {
                        throw t;
                    }
                }
            }
        }
    } finally {
        if (client != null) {
            client.shutDown();
        }
        for (int i = 0; i < numServers; i++) {
            if (server[i] != null) {
                server[i].shutDown();
            }
        }
        if (clientTaskExecutor != null) {
            clientTaskExecutor.shutdown();
        }
    }
}
Also used : KvStateRegistry(org.apache.flink.runtime.query.KvStateRegistry) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) KvStateID(org.apache.flink.runtime.query.KvStateID) ExecutionException(java.util.concurrent.ExecutionException) AbstractStateBackend(org.apache.flink.runtime.state.AbstractStateBackend) ClosedChannelException(java.nio.channels.ClosedChannelException) FiniteDuration(scala.concurrent.duration.FiniteDuration) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) InternalKvState(org.apache.flink.runtime.state.internal.InternalKvState) ExecutorService(java.util.concurrent.ExecutorService) Future(scala.concurrent.Future) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 9 with KvStateID

use of org.apache.flink.runtime.query.KvStateID in project flink by apache.

the class KvStateClientTest method testConcurrentQueries.

/**
	 * Multiple threads concurrently fire queries.
	 */
@Test
public void testConcurrentQueries() throws Exception {
    Deadline deadline = TEST_TIMEOUT.fromNow();
    AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
    ExecutorService executor = null;
    KvStateClient client = null;
    Channel serverChannel = null;
    final byte[] serializedResult = new byte[1024];
    ThreadLocalRandom.current().nextBytes(serializedResult);
    try {
        int numQueryTasks = 4;
        final int numQueriesPerTask = 1024;
        executor = Executors.newFixedThreadPool(numQueryTasks);
        client = new KvStateClient(1, stats);
        serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                ByteBuf buf = (ByteBuf) msg;
                assertEquals(KvStateRequestType.REQUEST, KvStateRequestSerializer.deserializeHeader(buf));
                KvStateRequest request = KvStateRequestSerializer.deserializeKvStateRequest(buf);
                buf.release();
                ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestResult(ctx.alloc(), request.getRequestId(), serializedResult);
                ctx.channel().writeAndFlush(response);
            }
        });
        final KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
        final KvStateClient finalClient = client;
        Callable<List<Future<byte[]>>> queryTask = new Callable<List<Future<byte[]>>>() {

            @Override
            public List<Future<byte[]>> call() throws Exception {
                List<Future<byte[]>> results = new ArrayList<>(numQueriesPerTask);
                for (int i = 0; i < numQueriesPerTask; i++) {
                    results.add(finalClient.getKvState(serverAddress, new KvStateID(), new byte[0]));
                }
                return results;
            }
        };
        // Submit query tasks
        List<java.util.concurrent.Future<List<Future<byte[]>>>> futures = new ArrayList<>();
        for (int i = 0; i < numQueryTasks; i++) {
            futures.add(executor.submit(queryTask));
        }
        // Verify results
        for (java.util.concurrent.Future<List<Future<byte[]>>> future : futures) {
            List<Future<byte[]>> results = future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
            for (Future<byte[]> result : results) {
                byte[] actual = Await.result(result, deadline.timeLeft());
                assertArrayEquals(serializedResult, actual);
            }
        }
        int totalQueries = numQueryTasks * numQueriesPerTask;
        // Counts can take some time to propagate
        while (deadline.hasTimeLeft() && stats.getNumSuccessful() != totalQueries) {
            Thread.sleep(100);
        }
        assertEquals(totalQueries, stats.getNumRequests());
        assertEquals(totalQueries, stats.getNumSuccessful());
    } finally {
        if (executor != null) {
            executor.shutdown();
        }
        if (serverChannel != null) {
            serverChannel.close();
        }
        if (client != null) {
            client.shutDown();
        }
        assertEquals("Channel leak", 0, stats.getNumConnections());
    }
}
Also used : ArrayList(java.util.ArrayList) KvStateServerAddress(org.apache.flink.runtime.query.KvStateServerAddress) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) ByteBuf(io.netty.buffer.ByteBuf) Callable(java.util.concurrent.Callable) KvStateID(org.apache.flink.runtime.query.KvStateID) List(java.util.List) ArrayList(java.util.ArrayList) KvStateRequest(org.apache.flink.runtime.query.netty.message.KvStateRequest) Deadline(scala.concurrent.duration.Deadline) SocketChannel(io.netty.channel.socket.SocketChannel) NioServerSocketChannel(io.netty.channel.socket.nio.NioServerSocketChannel) Channel(io.netty.channel.Channel) ExecutorService(java.util.concurrent.ExecutorService) Future(scala.concurrent.Future) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) Test(org.junit.Test)

Example 10 with KvStateID

use of org.apache.flink.runtime.query.KvStateID in project flink by apache.

the class KvStateClientTest method testSimpleRequests.

/**
	 * Tests simple queries, of which half succeed and half fail.
	 */
@Test
public void testSimpleRequests() throws Exception {
    Deadline deadline = TEST_TIMEOUT.fromNow();
    AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
    KvStateClient client = null;
    Channel serverChannel = null;
    try {
        client = new KvStateClient(1, stats);
        // Random result
        final byte[] expected = new byte[1024];
        ThreadLocalRandom.current().nextBytes(expected);
        final LinkedBlockingQueue<ByteBuf> received = new LinkedBlockingQueue<>();
        final AtomicReference<Channel> channel = new AtomicReference<>();
        serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {

            @Override
            public void channelActive(ChannelHandlerContext ctx) throws Exception {
                channel.set(ctx.channel());
            }

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                received.add((ByteBuf) msg);
            }
        });
        KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
        List<Future<byte[]>> futures = new ArrayList<>();
        int numQueries = 1024;
        for (int i = 0; i < numQueries; i++) {
            futures.add(client.getKvState(serverAddress, new KvStateID(), new byte[0]));
        }
        // Respond to messages
        Exception testException = new RuntimeException("Expected test Exception");
        for (int i = 0; i < numQueries; i++) {
            ByteBuf buf = received.poll(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
            assertNotNull("Receive timed out", buf);
            Channel ch = channel.get();
            assertNotNull("Channel not active", ch);
            assertEquals(KvStateRequestType.REQUEST, KvStateRequestSerializer.deserializeHeader(buf));
            KvStateRequest request = KvStateRequestSerializer.deserializeKvStateRequest(buf);
            buf.release();
            if (i % 2 == 0) {
                ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestResult(serverChannel.alloc(), request.getRequestId(), expected);
                ch.writeAndFlush(response);
            } else {
                ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestFailure(serverChannel.alloc(), request.getRequestId(), testException);
                ch.writeAndFlush(response);
            }
        }
        for (int i = 0; i < numQueries; i++) {
            if (i % 2 == 0) {
                byte[] serializedResult = Await.result(futures.get(i), deadline.timeLeft());
                assertArrayEquals(expected, serializedResult);
            } else {
                try {
                    Await.result(futures.get(i), deadline.timeLeft());
                    fail("Did not throw expected Exception");
                } catch (RuntimeException ignored) {
                // Expected
                }
            }
        }
        assertEquals(numQueries, stats.getNumRequests());
        int expectedRequests = numQueries / 2;
        // Counts can take some time to propagate
        while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != expectedRequests || stats.getNumFailed() != expectedRequests)) {
            Thread.sleep(100);
        }
        assertEquals(expectedRequests, stats.getNumSuccessful());
        assertEquals(expectedRequests, stats.getNumFailed());
    } finally {
        if (client != null) {
            client.shutDown();
        }
        if (serverChannel != null) {
            serverChannel.close();
        }
        assertEquals("Channel leak", 0, stats.getNumConnections());
    }
}
Also used : KvStateRequest(org.apache.flink.runtime.query.netty.message.KvStateRequest) Deadline(scala.concurrent.duration.Deadline) SocketChannel(io.netty.channel.socket.SocketChannel) NioServerSocketChannel(io.netty.channel.socket.nio.NioServerSocketChannel) Channel(io.netty.channel.Channel) ArrayList(java.util.ArrayList) KvStateServerAddress(org.apache.flink.runtime.query.KvStateServerAddress) AtomicReference(java.util.concurrent.atomic.AtomicReference) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ByteBuf(io.netty.buffer.ByteBuf) ConnectException(java.net.ConnectException) ClosedChannelException(java.nio.channels.ClosedChannelException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) Future(scala.concurrent.Future) KvStateID(org.apache.flink.runtime.query.KvStateID) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) Test(org.junit.Test)

Aggregations

KvStateID (org.apache.flink.runtime.query.KvStateID)13 Test (org.junit.Test)12 ByteBuf (io.netty.buffer.ByteBuf)8 KvStateServerAddress (org.apache.flink.runtime.query.KvStateServerAddress)6 Deadline (scala.concurrent.duration.Deadline)6 Channel (io.netty.channel.Channel)4 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)4 ChannelInboundHandlerAdapter (io.netty.channel.ChannelInboundHandlerAdapter)4 SocketChannel (io.netty.channel.socket.SocketChannel)4 NioServerSocketChannel (io.netty.channel.socket.nio.NioServerSocketChannel)4 ArrayList (java.util.ArrayList)4 KvStateRegistry (org.apache.flink.runtime.query.KvStateRegistry)4 Future (scala.concurrent.Future)4 EmbeddedChannel (io.netty.channel.embedded.EmbeddedChannel)3 ClosedChannelException (java.nio.channels.ClosedChannelException)3 AtomicReference (java.util.concurrent.atomic.AtomicReference)3 JobID (org.apache.flink.api.common.JobID)3 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)3 KeyGroupRange (org.apache.flink.runtime.state.KeyGroupRange)3 ConnectException (java.net.ConnectException)2