use of java.util.concurrent.CountDownLatch in project flink by apache.
the class ZooKeeperStateHandleStoreITCase method testRemoveWithCallback.
/**
* Tests that state handles are correctly removed with a callback.
*/
@Test
public void testRemoveWithCallback() throws Exception {
// Setup
LongStateStorage stateHandleProvider = new LongStateStorage();
ZooKeeperStateHandleStore<Long> store = new ZooKeeperStateHandleStore<>(ZooKeeper.getClient(), stateHandleProvider, Executors.directExecutor());
// Config
final String pathInZooKeeper = "/testRemoveWithCallback";
final Long state = 27255442L;
store.add(pathInZooKeeper, state);
final CountDownLatch sync = new CountDownLatch(1);
BackgroundCallback callback = mock(BackgroundCallback.class);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
sync.countDown();
return null;
}
}).when(callback).processResult(eq(ZooKeeper.getClient()), any(CuratorEvent.class));
// Test
store.remove(pathInZooKeeper, callback);
// Verify discarded and callback called
assertEquals(0, ZooKeeper.getClient().getChildren().forPath("/").size());
sync.await();
verify(callback, times(1)).processResult(eq(ZooKeeper.getClient()), any(CuratorEvent.class));
}
use of java.util.concurrent.CountDownLatch in project flink by apache.
the class RescalingITCase method createJobGraphWithKeyedAndNonPartitionedOperatorState.
private static JobGraph createJobGraphWithKeyedAndNonPartitionedOperatorState(int parallelism, int maxParallelism, int fixedParallelism, int numberKeys, int numberElements, boolean terminateAfterEmission, int checkpointingInterval) {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
env.getConfig().setMaxParallelism(maxParallelism);
env.enableCheckpointing(checkpointingInterval);
env.setRestartStrategy(RestartStrategies.noRestart());
DataStream<Integer> input = env.addSource(new SubtaskIndexNonPartitionedStateSource(numberKeys, numberElements, terminateAfterEmission)).setParallelism(fixedParallelism).keyBy(new KeySelector<Integer, Integer>() {
private static final long serialVersionUID = -7952298871120320940L;
@Override
public Integer getKey(Integer value) throws Exception {
return value;
}
});
SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);
DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));
result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());
return env.getStreamGraph().getJobGraph();
}
use of java.util.concurrent.CountDownLatch in project flink by apache.
the class RescalingITCase method createJobGraphWithKeyedState.
private static JobGraph createJobGraphWithKeyedState(int parallelism, int maxParallelism, int numberKeys, int numberElements, boolean terminateAfterEmission, int checkpointingInterval) {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(parallelism);
if (0 < maxParallelism) {
env.getConfig().setMaxParallelism(maxParallelism);
}
env.enableCheckpointing(checkpointingInterval);
env.setRestartStrategy(RestartStrategies.noRestart());
DataStream<Integer> input = env.addSource(new SubtaskIndexSource(numberKeys, numberElements, terminateAfterEmission)).keyBy(new KeySelector<Integer, Integer>() {
private static final long serialVersionUID = -7952298871120320940L;
@Override
public Integer getKey(Integer value) throws Exception {
return value;
}
});
SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);
DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));
result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());
return env.getStreamGraph().getJobGraph();
}
use of java.util.concurrent.CountDownLatch in project hadoop by apache.
the class TestMutableMetrics method testMutableRatesWithAggregationManyThreads.
@Test
public void testMutableRatesWithAggregationManyThreads() throws InterruptedException {
final MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
final int n = 10;
long[] opCount = new long[n];
double[] opTotalTime = new double[n];
for (int i = 0; i < n; i++) {
opCount[i] = 0;
opTotalTime[i] = 0;
// Initialize so that the getLongCounter() method doesn't complain
rates.add("metric" + i, 0);
}
Thread[] threads = new Thread[n];
final CountDownLatch firstAddsFinished = new CountDownLatch(threads.length);
final CountDownLatch firstSnapshotsFinished = new CountDownLatch(1);
final CountDownLatch secondAddsFinished = new CountDownLatch(threads.length);
final CountDownLatch secondSnapshotsFinished = new CountDownLatch(1);
long seed = new Random().nextLong();
LOG.info("Random seed = " + seed);
final Random sleepRandom = new Random(seed);
for (int tIdx = 0; tIdx < threads.length; tIdx++) {
final int threadIdx = tIdx;
threads[threadIdx] = new Thread() {
@Override
public void run() {
try {
for (int i = 0; i < 1000; i++) {
rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2);
// Sleep so additions can be interleaved with snapshots
Thread.sleep(sleepRandom.nextInt(5));
}
firstAddsFinished.countDown();
// Make sure all threads stay alive long enough for the first
// snapshot to complete; else their metrics may be lost to GC
firstSnapshotsFinished.await();
// Let half the threads continue with more metrics and let half die
if (threadIdx % 2 == 0) {
for (int i = 0; i < 1000; i++) {
rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2);
}
secondAddsFinished.countDown();
secondSnapshotsFinished.await();
} else {
secondAddsFinished.countDown();
}
} catch (InterruptedException e) {
// Ignore
}
}
};
}
for (Thread t : threads) {
t.start();
}
// opCount / opTotalTime
for (int i = 0; i < 100; i++) {
snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
Thread.sleep(sleepRandom.nextInt(20));
}
firstAddsFinished.await();
// Final snapshot to grab any remaining metrics and then verify that
// the totals are as expected
snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
for (int i = 0; i < n; i++) {
assertEquals("metric" + i + " count", 1001, opCount[i]);
assertEquals("metric" + i + " total", 1500, opTotalTime[i], 1.0);
}
firstSnapshotsFinished.countDown();
// After half of the threads die, ensure that the remaining ones still
// add metrics correctly and that snapshot occurs correctly
secondAddsFinished.await();
snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
for (int i = 0; i < n; i++) {
assertEquals("metric" + i + " count", 1501, opCount[i]);
assertEquals("metric" + i + " total", 2250, opTotalTime[i], 1.0);
}
secondSnapshotsFinished.countDown();
}
use of java.util.concurrent.CountDownLatch in project hadoop by apache.
the class TestBlockReaderFactory method testShortCircuitCacheTemporaryFailure.
/**
* Test the case where we have a failure to complete a short circuit read
* that occurs, and then later on, we have a success.
* Any thread waiting on a cache load should receive the failure (if it
* occurs); however, the failure result should not be cached. We want
* to be able to retry later and succeed.
*/
@Test(timeout = 60000)
public void testShortCircuitCacheTemporaryFailure() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicBoolean replicaCreationShouldFail = new AtomicBoolean(true);
final AtomicBoolean testFailed = new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting = true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
@Override
public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
if (replicaCreationShouldFail.get()) {
// Insert a short delay to increase the chance that one client
// thread waits for the other client thread's failure via
// a condition variable.
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
return new ShortCircuitReplicaInfo();
}
return null;
}
};
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf("testShortCircuitCacheTemporaryFailure", sockDir);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int NUM_THREADS = 2;
final int SEED = 0xFADED;
final CountDownLatch gotFailureLatch = new CountDownLatch(NUM_THREADS);
final CountDownLatch shouldRetryLatch = new CountDownLatch(1);
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
Runnable readerRunnable = new Runnable() {
@Override
public void run() {
try {
// First time should fail.
List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
// first block
LocatedBlock lblock = locatedBlocks.get(0);
BlockReader blockReader = null;
try {
blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
Assert.fail("expected getBlockReader to fail the first time.");
} catch (Throwable t) {
Assert.assertTrue("expected to see 'TCP reads were disabled " + "for testing' in exception " + t, t.getMessage().contains("TCP reads were disabled for testing"));
} finally {
// keep findbugs happy
if (blockReader != null)
blockReader.close();
}
gotFailureLatch.countDown();
shouldRetryLatch.await();
// Second time should succeed.
try {
blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
} catch (Throwable t) {
LOG.error("error trying to retrieve a block reader " + "the second time.", t);
throw t;
} finally {
if (blockReader != null)
blockReader.close();
}
} catch (Throwable t) {
LOG.error("getBlockReader failure", t);
testFailed.set(true);
}
}
};
Thread[] threads = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new Thread(readerRunnable);
threads[i].start();
}
gotFailureLatch.await();
replicaCreationShouldFail.set(false);
shouldRetryLatch.countDown();
for (int i = 0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
Aggregations