use of java.util.concurrent.atomic.AtomicReference in project flink by apache.
the class SystemProcessingTimeServiceTest method testImmediateShutdown.
@Test
public void testImmediateShutdown() throws Exception {
final Object lock = new Object();
final AtomicReference<Throwable> errorRef = new AtomicReference<>();
final SystemProcessingTimeService timer = new SystemProcessingTimeService(new ReferenceSettingExceptionHandler(errorRef), lock);
try {
assertFalse(timer.isTerminated());
final OneShotLatch latch = new OneShotLatch();
// the task should trigger immediately and should block until terminated with interruption
timer.registerTimer(System.currentTimeMillis(), new ProcessingTimeCallback() {
@Override
public void onProcessingTime(long timestamp) throws Exception {
latch.trigger();
Thread.sleep(100000000);
}
});
latch.await();
timer.shutdownService();
//noinspection SynchronizationOnLocalVariableOrMethodParameter
synchronized (lock) {
assertTrue(timer.isTerminated());
}
try {
timer.registerTimer(System.currentTimeMillis() + 1000, new ProcessingTimeCallback() {
@Override
public void onProcessingTime(long timestamp) {
}
});
fail("should result in an exception");
} catch (IllegalStateException e) {
// expected
}
try {
timer.scheduleAtFixedRate(new ProcessingTimeCallback() {
@Override
public void onProcessingTime(long timestamp) {
}
}, 0L, 100L);
fail("should result in an exception");
} catch (IllegalStateException e) {
// expected
}
// obviously, we have an asynchronous interrupted exception
assertNotNull(errorRef.get());
assertTrue(errorRef.get().getCause() instanceof InterruptedException);
assertEquals(0, timer.getNumTasksScheduled());
} finally {
timer.shutdownService();
}
}
use of java.util.concurrent.atomic.AtomicReference in project flink by apache.
the class SystemProcessingTimeServiceTest method testScheduleAtFixedRateHoldsLock.
/**
* Tests that the schedule at fixed rate callback is called under the given lock
*/
@Test
public void testScheduleAtFixedRateHoldsLock() throws Exception {
final Object lock = new Object();
final AtomicReference<Throwable> errorRef = new AtomicReference<>();
final SystemProcessingTimeService timer = new SystemProcessingTimeService(new ReferenceSettingExceptionHandler(errorRef), lock);
final OneShotLatch awaitCallback = new OneShotLatch();
try {
assertEquals(0, timer.getNumTasksScheduled());
// schedule something
ScheduledFuture<?> future = timer.scheduleAtFixedRate(new ProcessingTimeCallback() {
@Override
public void onProcessingTime(long timestamp) {
assertTrue(Thread.holdsLock(lock));
awaitCallback.trigger();
}
}, 0L, 100L);
// wait until the first execution is active
awaitCallback.await();
// cancel periodic callback
future.cancel(true);
// check that no asynchronous error was reported
if (errorRef.get() != null) {
throw new Exception(errorRef.get());
}
} finally {
timer.shutdownService();
}
}
use of java.util.concurrent.atomic.AtomicReference in project groovy by apache.
the class GenericsUtils method parseClassNodesFromString.
public static ClassNode[] parseClassNodesFromString(final String option, final SourceUnit sourceUnit, final CompilationUnit compilationUnit, final MethodNode mn, final ASTNode usage) {
GroovyLexer lexer = new GroovyLexer(new StringReader("DummyNode<" + option + ">"));
final GroovyRecognizer rn = GroovyRecognizer.make(lexer);
try {
rn.classOrInterfaceType(true);
final AtomicReference<ClassNode> ref = new AtomicReference<ClassNode>();
AntlrParserPlugin plugin = new AntlrParserPlugin() {
@Override
public ModuleNode buildAST(final SourceUnit sourceUnit, final ClassLoader classLoader, final Reduction cst) throws ParserException {
ref.set(makeTypeWithArguments(rn.getAST()));
return null;
}
};
plugin.buildAST(null, null, null);
ClassNode parsedNode = ref.get();
// the returned node is DummyNode<Param1, Param2, Param3, ...)
GenericsType[] parsedNodeGenericsTypes = parsedNode.getGenericsTypes();
if (parsedNodeGenericsTypes == null) {
return null;
}
ClassNode[] signature = new ClassNode[parsedNodeGenericsTypes.length];
for (int i = 0; i < parsedNodeGenericsTypes.length; i++) {
final GenericsType genericsType = parsedNodeGenericsTypes[i];
signature[i] = resolveClassNode(sourceUnit, compilationUnit, mn, usage, genericsType.getType());
}
return signature;
} catch (RecognitionException e) {
sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
} catch (TokenStreamException e) {
sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
} catch (ParserException e) {
sourceUnit.addError(new IncorrectTypeHintException(mn, e, usage.getLineNumber(), usage.getColumnNumber()));
}
return null;
}
use of java.util.concurrent.atomic.AtomicReference in project flink by apache.
the class NettyServerLowAndHighWatermarkTest method testLowAndHighWatermarks.
/**
* Verifies that the high and low watermark are set in relation to the page size.
*
* <p> The high and low water marks control the data flow to the wire. If the Netty write buffer
* has size greater or equal to the high water mark, the channel state becomes not-writable.
* Only when the size falls below the low water mark again, the state changes to writable again.
*
* <p> The Channel writability state needs to be checked by the handler when writing to the
* channel and is not enforced in the sense that you cannot write a channel, which is in
* not-writable state.
*/
@Test
public void testLowAndHighWatermarks() throws Throwable {
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
final NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
// The channel handler implements the test
return new ChannelHandler[] { new TestLowAndHighWatermarkHandler(error) };
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new ChannelHandler[0];
}
};
final NettyConfig conf = createConfig(PageSize);
final NettyServerAndClient serverAndClient = initServerAndClient(protocol, conf);
try {
// We can't just check the config of this channel as it is the client's channel. We need
// to check the server channel, because it is doing the data transfers.
final Channel ch = connect(serverAndClient);
// Wait for the channel to be closed
awaitClose(ch);
final Throwable t = error.get();
if (t != null) {
throw t;
}
} finally {
shutdown(serverAndClient);
}
}
use of java.util.concurrent.atomic.AtomicReference in project flink by apache.
the class JobManagerHACheckpointRecoveryITCase method testCheckpointedStreamingSumProgram.
/**
* Simple checkpointed streaming sum.
*
* <p>The sources (Parallelism) count until sequenceEnd. The sink (1) sums up all counts and
* returns it to the main thread via a static variable. We wait until some checkpoints are
* completed and sanity check that the sources recover with an updated state to make sure that
* this test actually tests something.
*/
@Test
@RetryOnFailure(times = 1)
public void testCheckpointedStreamingSumProgram() throws Exception {
// Config
final int checkpointingInterval = 200;
final int sequenceEnd = 5000;
final long expectedSum = Parallelism * sequenceEnd * (sequenceEnd + 1) / 2;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
env.setParallelism(Parallelism);
env.enableCheckpointing(checkpointingInterval);
env.addSource(new CheckpointedSequenceSource(sequenceEnd)).addSink(new CountingSink()).setParallelism(1);
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
Configuration config = ZooKeeperTestUtils.createZooKeeperHAConfig(ZooKeeper.getConnectString(), FileStateBackendBasePath.getAbsoluteFile().toURI().toString());
config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, Parallelism);
ActorSystem testSystem = null;
final JobManagerProcess[] jobManagerProcess = new JobManagerProcess[2];
LeaderRetrievalService leaderRetrievalService = null;
ActorSystem taskManagerSystem = null;
try {
final Deadline deadline = TestTimeOut.fromNow();
// Test actor system
testSystem = AkkaUtils.createActorSystem(new Configuration(), new Some<>(new Tuple2<String, Object>("localhost", 0)));
// The job managers
jobManagerProcess[0] = new JobManagerProcess(0, config);
jobManagerProcess[1] = new JobManagerProcess(1, config);
jobManagerProcess[0].startProcess();
jobManagerProcess[1].startProcess();
// Leader listener
TestingListener leaderListener = new TestingListener();
leaderRetrievalService = ZooKeeperUtils.createLeaderRetrievalService(config);
leaderRetrievalService.start(leaderListener);
// The task manager
taskManagerSystem = AkkaUtils.createActorSystem(config, Option.apply(new Tuple2<String, Object>("localhost", 0)));
TaskManager.startTaskManagerComponentsAndActor(config, ResourceID.generate(), taskManagerSystem, "localhost", Option.<String>empty(), Option.<LeaderRetrievalService>empty(), false, TaskManager.class);
{
// Initial submission
leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
String leaderAddress = leaderListener.getAddress();
UUID leaderId = leaderListener.getLeaderSessionID();
// Get the leader ref
ActorRef leaderRef = AkkaUtils.getActorRef(leaderAddress, testSystem, deadline.timeLeft());
ActorGateway leader = new AkkaActorGateway(leaderRef, leaderId);
// Submit the job in detached mode
leader.tell(new SubmitJob(jobGraph, ListeningBehaviour.DETACHED));
JobManagerActorTestUtils.waitForJobStatus(jobGraph.getJobID(), JobStatus.RUNNING, leader, deadline.timeLeft());
}
// Who's the boss?
JobManagerProcess leadingJobManagerProcess;
if (jobManagerProcess[0].getJobManagerAkkaURL(deadline.timeLeft()).equals(leaderListener.getAddress())) {
leadingJobManagerProcess = jobManagerProcess[0];
} else {
leadingJobManagerProcess = jobManagerProcess[1];
}
CompletedCheckpointsLatch.await();
// Kill the leading job manager process
leadingJobManagerProcess.destroy();
{
// Recovery by the standby JobManager
leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());
String leaderAddress = leaderListener.getAddress();
UUID leaderId = leaderListener.getLeaderSessionID();
ActorRef leaderRef = AkkaUtils.getActorRef(leaderAddress, testSystem, deadline.timeLeft());
ActorGateway leader = new AkkaActorGateway(leaderRef, leaderId);
JobManagerActorTestUtils.waitForJobStatus(jobGraph.getJobID(), JobStatus.RUNNING, leader, deadline.timeLeft());
}
// Wait to finish
FinalCountLatch.await();
assertEquals(expectedSum, (long) FinalCount.get());
for (int i = 0; i < Parallelism; i++) {
assertNotEquals(0, RecoveredStates.get(i));
}
} catch (Throwable t) {
// Reset all static state for test retries
CompletedCheckpointsLatch = new CountDownLatch(2);
RecoveredStates = new AtomicLongArray(Parallelism);
FinalCountLatch = new CountDownLatch(1);
FinalCount = new AtomicReference<>();
LastElement = -1;
// Print early (in some situations the process logs get too big
// for Travis and the root problem is not shown)
t.printStackTrace();
// In case of an error, print the job manager process logs.
if (jobManagerProcess[0] != null) {
jobManagerProcess[0].printProcessLog();
}
if (jobManagerProcess[1] != null) {
jobManagerProcess[1].printProcessLog();
}
throw t;
} finally {
if (jobManagerProcess[0] != null) {
jobManagerProcess[0].destroy();
}
if (jobManagerProcess[1] != null) {
jobManagerProcess[1].destroy();
}
if (leaderRetrievalService != null) {
leaderRetrievalService.stop();
}
if (taskManagerSystem != null) {
taskManagerSystem.shutdown();
}
if (testSystem != null) {
testSystem.shutdown();
}
}
}
Aggregations