Search in sources :

Example 1 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project flink by apache.

the class SystemProcessingTimeServiceTest method testImmediateShutdown.

@Test
public void testImmediateShutdown() throws Exception {
    final Object lock = new Object();
    final AtomicReference<Throwable> errorRef = new AtomicReference<>();
    final SystemProcessingTimeService timer = new SystemProcessingTimeService(new ReferenceSettingExceptionHandler(errorRef), lock);
    try {
        assertFalse(timer.isTerminated());
        final OneShotLatch latch = new OneShotLatch();
        // the task should trigger immediately and should block until terminated with interruption
        timer.registerTimer(System.currentTimeMillis(), new ProcessingTimeCallback() {

            @Override
            public void onProcessingTime(long timestamp) throws Exception {
                latch.trigger();
                Thread.sleep(100000000);
            }
        });
        latch.await();
        timer.shutdownService();
        //noinspection SynchronizationOnLocalVariableOrMethodParameter
        synchronized (lock) {
            assertTrue(timer.isTerminated());
        }
        try {
            timer.registerTimer(System.currentTimeMillis() + 1000, new ProcessingTimeCallback() {

                @Override
                public void onProcessingTime(long timestamp) {
                }
            });
            fail("should result in an exception");
        } catch (IllegalStateException e) {
        // expected
        }
        try {
            timer.scheduleAtFixedRate(new ProcessingTimeCallback() {

                @Override
                public void onProcessingTime(long timestamp) {
                }
            }, 0L, 100L);
            fail("should result in an exception");
        } catch (IllegalStateException e) {
        // expected
        }
        // obviously, we have an asynchronous interrupted exception
        assertNotNull(errorRef.get());
        assertTrue(errorRef.get().getCause() instanceof InterruptedException);
        assertEquals(0, timer.getNumTasksScheduled());
    } finally {
        timer.shutdownService();
    }
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) ReferenceSettingExceptionHandler(org.apache.flink.streaming.runtime.operators.TestProcessingTimeServiceTest.ReferenceSettingExceptionHandler) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) Test(org.junit.Test)

Example 2 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project flink by apache.

the class SystemProcessingTimeServiceTest method testScheduleAtFixedRateHoldsLock.

/**
	 * Tests that the schedule at fixed rate callback is called under the given lock
	 */
@Test
public void testScheduleAtFixedRateHoldsLock() throws Exception {
    final Object lock = new Object();
    final AtomicReference<Throwable> errorRef = new AtomicReference<>();
    final SystemProcessingTimeService timer = new SystemProcessingTimeService(new ReferenceSettingExceptionHandler(errorRef), lock);
    final OneShotLatch awaitCallback = new OneShotLatch();
    try {
        assertEquals(0, timer.getNumTasksScheduled());
        // schedule something
        ScheduledFuture<?> future = timer.scheduleAtFixedRate(new ProcessingTimeCallback() {

            @Override
            public void onProcessingTime(long timestamp) {
                assertTrue(Thread.holdsLock(lock));
                awaitCallback.trigger();
            }
        }, 0L, 100L);
        // wait until the first execution is active
        awaitCallback.await();
        // cancel periodic callback
        future.cancel(true);
        // check that no asynchronous error was reported
        if (errorRef.get() != null) {
            throw new Exception(errorRef.get());
        }
    } finally {
        timer.shutdownService();
    }
}
Also used : OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) AtomicReference(java.util.concurrent.atomic.AtomicReference) ReferenceSettingExceptionHandler(org.apache.flink.streaming.runtime.operators.TestProcessingTimeServiceTest.ReferenceSettingExceptionHandler) Test(org.junit.Test)

Example 3 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project groovy by apache.

the class GenericsUtils method parseClassNodesFromString.

public static ClassNode[] parseClassNodesFromString(final String option, final SourceUnit sourceUnit, final CompilationUnit compilationUnit, final MethodNode mn, final ASTNode usage) {
    GroovyLexer lexer = new GroovyLexer(new StringReader("DummyNode<" + option + ">"));
    final GroovyRecognizer rn = GroovyRecognizer.make(lexer);
    try {
        rn.classOrInterfaceType(true);
        final AtomicReference<ClassNode> ref = new AtomicReference<ClassNode>();
        AntlrParserPlugin plugin = new AntlrParserPlugin() {

            @Override
            public ModuleNode buildAST(final SourceUnit sourceUnit, final ClassLoader classLoader, final Reduction cst) throws ParserException {
                ref.set(makeTypeWithArguments(rn.getAST()));
                return null;
            }
        };
        plugin.buildAST(null, null, null);
        ClassNode parsedNode = ref.get();
        // the returned node is DummyNode<Param1, Param2, Param3, ...)
        GenericsType[] parsedNodeGenericsTypes = parsedNode.getGenericsTypes();
        if (parsedNodeGenericsTypes == null) {
            return null;
        }
        ClassNode[] signature = new ClassNode[parsedNodeGenericsTypes.length];
        for (int i = 0; i < parsedNodeGenericsTypes.length; i++) {
            final GenericsType genericsType = parsedNodeGenericsTypes[i];
            signature[i] = resolveClassNode(sourceUnit, compilationUnit, mn, usage, genericsType.getType());
        }
        return signature;
    } catch (RecognitionException e) {
        sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
    } catch (TokenStreamException e) {
        sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
    } catch (ParserException e) {
        sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
    }
    return null;
}
Also used : ClassNode(org.codehaus.groovy.ast.ClassNode) ParserException(org.codehaus.groovy.syntax.ParserException) IncorrectTypeHintException(groovy.transform.stc.IncorrectTypeHintException) AtomicReference(java.util.concurrent.atomic.AtomicReference) SourceUnit(org.codehaus.groovy.control.SourceUnit) TokenStreamException(antlr.TokenStreamException) Reduction(org.codehaus.groovy.syntax.Reduction) AntlrParserPlugin(org.codehaus.groovy.antlr.AntlrParserPlugin) GroovyLexer(org.codehaus.groovy.antlr.parser.GroovyLexer) StringReader(java.io.StringReader) GenericsType(org.codehaus.groovy.ast.GenericsType) GroovyRecognizer(org.codehaus.groovy.antlr.parser.GroovyRecognizer) RecognitionException(antlr.RecognitionException)

Example 4 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project flink by apache.

the class NettyServerLowAndHighWatermarkTest method testLowAndHighWatermarks.

/**
	 * Verifies that the high and low watermark are set in relation to the page size.
	 *
	 * <p> The high and low water marks control the data flow to the wire. If the Netty write buffer
	 * has size greater or equal to the high water mark, the channel state becomes not-writable.
	 * Only when the size falls below the low water mark again, the state changes to writable again.
	 *
	 * <p> The Channel writability state needs to be checked by the handler when writing to the
	 * channel and is not enforced in the sense that you cannot write a channel, which is in
	 * not-writable state.
	 */
@Test
public void testLowAndHighWatermarks() throws Throwable {
    final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
    final NettyProtocol protocol = new NettyProtocol() {

        @Override
        public ChannelHandler[] getServerChannelHandlers() {
            // The channel handler implements the test
            return new ChannelHandler[] { new TestLowAndHighWatermarkHandler(error) };
        }

        @Override
        public ChannelHandler[] getClientChannelHandlers() {
            return new ChannelHandler[0];
        }
    };
    final NettyConfig conf = createConfig(PageSize);
    final NettyServerAndClient serverAndClient = initServerAndClient(protocol, conf);
    try {
        // We can't just check the config of this channel as it is the client's channel. We need
        // to check the server channel, because it is doing the data transfers.
        final Channel ch = connect(serverAndClient);
        // Wait for the channel to be closed
        awaitClose(ch);
        final Throwable t = error.get();
        if (t != null) {
            throw t;
        }
    } finally {
        shutdown(serverAndClient);
    }
}
Also used : Channel(io.netty.channel.Channel) AtomicReference(java.util.concurrent.atomic.AtomicReference) ChannelHandler(io.netty.channel.ChannelHandler) NettyServerAndClient(org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient) Test(org.junit.Test)

Example 5 with AtomicReference

use of java.util.concurrent.atomic.AtomicReference in project flink by apache.

the class JobManagerHACheckpointRecoveryITCase method testCheckpointedStreamingSumProgram.

/**
	 * Simple checkpointed streaming sum.
	 *
	 * <p>The sources (Parallelism) count until sequenceEnd. The sink (1) sums up all counts and
	 * returns it to the main thread via a static variable. We wait until some checkpoints are
	 * completed and sanity check that the sources recover with an updated state to make sure that
	 * this test actually tests something.
	 */
@Test
@RetryOnFailure(times = 1)
public void testCheckpointedStreamingSumProgram() throws Exception {
    // Config
    final int checkpointingInterval = 200;
    final int sequenceEnd = 5000;
    final long expectedSum = Parallelism * sequenceEnd * (sequenceEnd + 1) / 2;
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
    env.setParallelism(Parallelism);
    env.enableCheckpointing(checkpointingInterval);
    env.addSource(new CheckpointedSequenceSource(sequenceEnd)).addSink(new CountingSink()).setParallelism(1);
    JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(ZooKeeper.getConnectString(), FileStateBackendBasePath.getAbsoluteFile().toURI().toString());
    config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, Parallelism);
    ActorSystem testSystem = null;
    final JobManagerProcess[] jobManagerProcess = new JobManagerProcess[2];
    LeaderRetrievalService leaderRetrievalService = null;
    ActorSystem taskManagerSystem = null;
    try {
        final Deadline deadline = TestTimeOut.fromNow();
        // Test actor system
        testSystem = AkkaUtils.createActorSystem(new Configuration(), new Some<>(new Tuple2<String, Object>("localhost", 0)));
        // The job managers
        jobManagerProcess[0] = new JobManagerProcess(0, config);
        jobManagerProcess[1] = new JobManagerProcess(1, config);
        jobManagerProcess[0].startProcess();
        jobManagerProcess[1].startProcess();
        // Leader listener
        TestingListener leaderListener = new TestingListener();
        leaderRetrievalService = ZooKeeperUtils.createLeaderRetrievalService(config);
        leaderRetrievalService.start(leaderListener);
        // The task manager
        taskManagerSystem = AkkaUtils.createActorSystem(config, Option.apply(new Tuple2<String, Object>("localhost", 0)));
        TaskManager.startTaskManagerComponentsAndActor(config, ResourceID.generate(), taskManagerSystem, "localhost", Option.<String>empty(), Option.<LeaderRetrievalService>empty(), false, TaskManager.class);
        {
            // Initial submission
            leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
            String leaderAddress = leaderListener.getAddress();
            UUID leaderId = leaderListener.getLeaderSessionID();
            // Get the leader ref
            ActorRef leaderRef = AkkaUtils.getActorRef(leaderAddress, testSystem, deadline.timeLeft());
            ActorGateway leader = new AkkaActorGateway(leaderRef, leaderId);
            // Submit the job in detached mode
            leader.tell(new SubmitJob(jobGraph, ListeningBehaviour.DETACHED));
            JobManagerActorTestUtils.waitForJobStatus(jobGraph.getJobID(), JobStatus.RUNNING, leader, deadline.timeLeft());
        }
        // Who's the boss?
        JobManagerProcess leadingJobManagerProcess;
        if (jobManagerProcess[0].getJobManagerAkkaURL(deadline.timeLeft()).equals(leaderListener.getAddress())) {
            leadingJobManagerProcess = jobManagerProcess[0];
        } else {
            leadingJobManagerProcess = jobManagerProcess[1];
        }
        CompletedCheckpointsLatch.await();
        // Kill the leading job manager process
        leadingJobManagerProcess.destroy();
        {
            // Recovery by the standby JobManager
            leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
            String leaderAddress = leaderListener.getAddress();
            UUID leaderId = leaderListener.getLeaderSessionID();
            ActorRef leaderRef = AkkaUtils.getActorRef(leaderAddress, testSystem, deadline.timeLeft());
            ActorGateway leader = new AkkaActorGateway(leaderRef, leaderId);
            JobManagerActorTestUtils.waitForJobStatus(jobGraph.getJobID(), JobStatus.RUNNING, leader, deadline.timeLeft());
        }
        // Wait to finish
        FinalCountLatch.await();
        assertEquals(expectedSum, (long) FinalCount.get());
        for (int i = 0; i < Parallelism; i++) {
            assertNotEquals(0, RecoveredStates.get(i));
        }
    } catch (Throwable t) {
        // Reset all static state for test retries
        CompletedCheckpointsLatch = new CountDownLatch(2);
        RecoveredStates = new AtomicLongArray(Parallelism);
        FinalCountLatch = new CountDownLatch(1);
        FinalCount = new AtomicReference<>();
        LastElement = -1;
        // Print early (in some situations the process logs get too big
        // for Travis and the root problem is not shown)
        t.printStackTrace();
        // In case of an error, print the job manager process logs.
        if (jobManagerProcess[0] != null) {
            jobManagerProcess[0].printProcessLog();
        }
        if (jobManagerProcess[1] != null) {
            jobManagerProcess[1].printProcessLog();
        }
        throw t;
    } finally {
        if (jobManagerProcess[0] != null) {
            jobManagerProcess[0].destroy();
        }
        if (jobManagerProcess[1] != null) {
            jobManagerProcess[1].destroy();
        }
        if (leaderRetrievalService != null) {
            leaderRetrievalService.stop();
        }
        if (taskManagerSystem != null) {
            taskManagerSystem.shutdown();
        }
        if (testSystem != null) {
            testSystem.shutdown();
        }
    }
}
Also used : ActorSystem(akka.actor.ActorSystem) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) TestingListener(org.apache.flink.runtime.leaderelection.TestingListener) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) UUID(java.util.UUID) SubmitJob(org.apache.flink.runtime.messages.JobManagerMessages.SubmitJob) Deadline(scala.concurrent.duration.Deadline) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) Some(scala.Some) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) JobManagerProcess(org.apache.flink.runtime.testutils.JobManagerProcess) AtomicLongArray(java.util.concurrent.atomic.AtomicLongArray) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test) RetryOnFailure(org.apache.flink.testutils.junit.RetryOnFailure)

Aggregations

AtomicReference (java.util.concurrent.atomic.AtomicReference)1331 Test (org.junit.Test)668 CountDownLatch (java.util.concurrent.CountDownLatch)437 IOException (java.io.IOException)263 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)205 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)159 ArrayList (java.util.ArrayList)108 HashMap (java.util.HashMap)105 List (java.util.List)95 Map (java.util.Map)77 Test (org.testng.annotations.Test)76 File (java.io.File)64 ExecutionException (java.util.concurrent.ExecutionException)60 HashSet (java.util.HashSet)54 URI (java.net.URI)48 TimeoutException (java.util.concurrent.TimeoutException)48 HttpServletRequest (javax.servlet.http.HttpServletRequest)48 HttpServletResponse (javax.servlet.http.HttpServletResponse)46 MockResponse (okhttp3.mockwebserver.MockResponse)46 ByteBuffer (java.nio.ByteBuffer)44