use of java.util.concurrent.Future in project camel by apache.
the class Mina2ProducerConcurrentTest method doSendMessages.
private void doSendMessages(int files, int poolSize) throws Exception {
getMockEndpoint("mock:result").expectedMessageCount(files);
ExecutorService executor = Executors.newFixedThreadPool(poolSize);
// we access the responses Map below only inside the main thread,
// so no need for a thread-safe Map implementation
Map<Integer, Future<String>> responses = new HashMap<Integer, Future<String>>();
for (int i = 0; i < files; i++) {
final int index = i;
Future<String> out = executor.submit(new Callable<String>() {
public String call() throws Exception {
return template.requestBody(String.format("mina2:tcp://localhost:%1$s?sync=true", getPort()), index, String.class);
}
});
responses.put(index, out);
}
assertMockEndpointsSatisfied();
assertEquals(files, responses.size());
// get all responses
Set<String> unique = new HashSet<String>();
for (Future<String> future : responses.values()) {
unique.add(future.get());
}
// should be 'files' unique responses
assertEquals("Should be " + files + " unique responses", files, unique.size());
executor.shutdownNow();
}
use of java.util.concurrent.Future in project flink by apache.
the class LocalInputChannelTest method testConcurrentConsumeMultiplePartitions.
/**
* Tests the consumption of multiple subpartitions via local input channels.
*
* <p> Multiple producer tasks produce pipelined partitions, which are consumed by multiple
* tasks via local input channels.
*/
@Test
public void testConcurrentConsumeMultiplePartitions() throws Exception {
// Config
final int parallelism = 32;
final int producerBufferPoolSize = parallelism + 1;
final int numberOfBuffersPerChannel = 1024;
checkArgument(parallelism >= 1);
checkArgument(producerBufferPoolSize >= parallelism);
checkArgument(numberOfBuffersPerChannel >= 1);
// Setup
// One thread per produced partition and one per consumer
final ExecutorService executor = Executors.newFixedThreadPool(2 * parallelism);
final NetworkBufferPool networkBuffers = new NetworkBufferPool((parallelism * producerBufferPoolSize) + (parallelism * parallelism), TestBufferFactory.BUFFER_SIZE, MemoryType.HEAP);
final ResultPartitionConsumableNotifier partitionConsumableNotifier = mock(ResultPartitionConsumableNotifier.class);
final TaskActions taskActions = mock(TaskActions.class);
final IOManager ioManager = mock(IOManager.class);
final JobID jobId = new JobID();
final ResultPartitionManager partitionManager = new ResultPartitionManager();
final ResultPartitionID[] partitionIds = new ResultPartitionID[parallelism];
final TestPartitionProducer[] partitionProducers = new TestPartitionProducer[parallelism];
// Create all partitions
for (int i = 0; i < parallelism; i++) {
partitionIds[i] = new ResultPartitionID();
final ResultPartition partition = new ResultPartition("Test Name", taskActions, jobId, partitionIds[i], ResultPartitionType.PIPELINED, parallelism, parallelism, partitionManager, partitionConsumableNotifier, ioManager, true);
// Create a buffer pool for this partition
partition.registerBufferPool(networkBuffers.createBufferPool(producerBufferPoolSize, producerBufferPoolSize));
// Create the producer
partitionProducers[i] = new TestPartitionProducer(partition, false, new TestPartitionProducerBufferSource(parallelism, partition.getBufferProvider(), numberOfBuffersPerChannel));
// Register with the partition manager in order to allow the local input channels to
// request their respective partitions.
partitionManager.registerResultPartition(partition);
}
// Test
try {
// Submit producer tasks
List<Future<?>> results = Lists.newArrayListWithCapacity(parallelism + 1);
for (int i = 0; i < parallelism; i++) {
results.add(executor.submit(partitionProducers[i]));
}
// Submit consumer
for (int i = 0; i < parallelism; i++) {
results.add(executor.submit(new TestLocalInputChannelConsumer(i, parallelism, numberOfBuffersPerChannel, networkBuffers.createBufferPool(parallelism, parallelism), partitionManager, new TaskEventDispatcher(), partitionIds)));
}
// Wait for all to finish
for (Future<?> result : results) {
result.get();
}
} finally {
networkBuffers.destroy();
executor.shutdown();
}
}
use of java.util.concurrent.Future in project flink by apache.
the class RemoteInputChannelTest method testConcurrentOnBufferAndRelease.
@Test
public void testConcurrentOnBufferAndRelease() throws Exception {
// Config
// Repeatedly spawn two tasks: one to queue buffers and the other to release the channel
// concurrently. We do this repeatedly to provoke races.
final int numberOfRepetitions = 8192;
// Setup
final ExecutorService executor = Executors.newFixedThreadPool(2);
try {
// Test
final SingleInputGate inputGate = mock(SingleInputGate.class);
for (int i = 0; i < numberOfRepetitions; i++) {
final RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate);
final Callable<Void> enqueueTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
while (true) {
for (int j = 0; j < 128; j++) {
inputChannel.onBuffer(TestBufferFactory.getMockBuffer(), j);
}
if (inputChannel.isReleased()) {
return null;
}
}
}
};
final Callable<Void> releaseTask = new Callable<Void>() {
@Override
public Void call() throws Exception {
inputChannel.releaseAllResources();
return null;
}
};
// Submit tasks and wait to finish
List<Future<Void>> results = Lists.newArrayListWithCapacity(2);
results.add(executor.submit(enqueueTask));
results.add(executor.submit(releaseTask));
for (Future<Void> result : results) {
result.get();
}
assertEquals("Resource leak during concurrent release and enqueue.", 0, inputChannel.getNumberOfQueuedBuffers());
}
} finally {
executor.shutdown();
}
}
use of java.util.concurrent.Future in project hadoop by apache.
the class DataStorage method linkBlocks.
private static void linkBlocks(File from, File to, int oldLV, HardLink hl, Configuration conf) throws IOException {
LOG.info("Start linking block files from " + from + " to " + to);
boolean upgradeToIdBasedLayout = false;
// 256x256 layouts (-56) is fortunately the same.
if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT_32_by_32.getInfo().getLayoutVersion() && to.getName().equals(STORAGE_DIR_FINALIZED)) {
upgradeToIdBasedLayout = true;
}
final ArrayList<LinkArgs> idBasedLayoutSingleLinks = Lists.newArrayList();
linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to, idBasedLayoutSingleLinks);
// Detect and remove duplicate entries.
final ArrayList<LinkArgs> duplicates = findDuplicateEntries(idBasedLayoutSingleLinks);
if (!duplicates.isEmpty()) {
LOG.error("There are " + duplicates.size() + " duplicate block " + "entries within the same volume.");
removeDuplicateEntries(idBasedLayoutSingleLinks, duplicates);
}
final int numLinkWorkers = conf.getInt(DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY, DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS);
ExecutorService linkWorkers = Executors.newFixedThreadPool(numLinkWorkers);
final int step = idBasedLayoutSingleLinks.size() / numLinkWorkers + 1;
List<Future<Void>> futures = Lists.newArrayList();
for (int i = 0; i < idBasedLayoutSingleLinks.size(); i += step) {
final int iCopy = i;
futures.add(linkWorkers.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
int upperBound = Math.min(iCopy + step, idBasedLayoutSingleLinks.size());
for (int j = iCopy; j < upperBound; j++) {
LinkArgs cur = idBasedLayoutSingleLinks.get(j);
HardLink.createHardLink(cur.src, cur.dst);
}
return null;
}
}));
}
linkWorkers.shutdown();
for (Future<Void> f : futures) {
Futures.get(f, IOException.class);
}
}
use of java.util.concurrent.Future in project hadoop by apache.
the class TestRPC method testClientBackOffByResponseTime.
/**
* Test RPC backoff by response time of each priority level.
*/
@Test(timeout = 30000)
public void testClientBackOffByResponseTime() throws Exception {
final TestRpcService proxy;
boolean succeeded = false;
final int numClients = 1;
GenericTestUtils.setLogLevel(DecayRpcScheduler.LOG, Level.DEBUG);
GenericTestUtils.setLogLevel(RPC.LOG, Level.DEBUG);
final List<Future<Void>> res = new ArrayList<Future<Void>>();
final ExecutorService executorService = Executors.newFixedThreadPool(numClients);
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
final String ns = CommonConfigurationKeys.IPC_NAMESPACE + ".0";
Server server = setupDecayRpcSchedulerandTestServer(ns + ".");
@SuppressWarnings("unchecked") CallQueueManager<Call> spy = spy((CallQueueManager<Call>) Whitebox.getInternalState(server, "callQueue"));
Whitebox.setInternalState(server, "callQueue", spy);
Exception lastException = null;
proxy = getClient(addr, conf);
MetricsRecordBuilder rb1 = getMetrics("DecayRpcSchedulerMetrics2." + ns);
final long beginDecayedCallVolume = MetricsAsserts.getLongCounter("DecayedCallVolume", rb1);
final long beginRawCallVolume = MetricsAsserts.getLongCounter("CallVolume", rb1);
final int beginUniqueCaller = MetricsAsserts.getIntCounter("UniqueCallers", rb1);
try {
// start a sleep RPC call that sleeps 3s.
for (int i = 0; i < numClients; i++) {
res.add(executorService.submit(new Callable<Void>() {
@Override
public Void call() throws ServiceException, InterruptedException {
proxy.sleep(null, newSleepRequest(3000));
return null;
}
}));
verify(spy, timeout(500).times(i + 1)).offer(Mockito.<Call>anyObject());
}
// avg response time(3s) exceeds threshold (2s).
try {
// wait for the 1st response time update
Thread.sleep(5500);
proxy.sleep(null, newSleepRequest(100));
} catch (ServiceException e) {
RemoteException re = (RemoteException) e.getCause();
IOException unwrapExeption = re.unwrapRemoteException();
if (unwrapExeption instanceof RetriableException) {
succeeded = true;
} else {
lastException = unwrapExeption;
}
// Lets Metric system update latest metrics
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
MetricsRecordBuilder rb2 = getMetrics("DecayRpcSchedulerMetrics2." + ns);
long decayedCallVolume1 = MetricsAsserts.getLongCounter("DecayedCallVolume", rb2);
long rawCallVolume1 = MetricsAsserts.getLongCounter("CallVolume", rb2);
int uniqueCaller1 = MetricsAsserts.getIntCounter("UniqueCallers", rb2);
long callVolumePriority0 = MetricsAsserts.getLongGauge("Priority.0.CompletedCallVolume", rb2);
long callVolumePriority1 = MetricsAsserts.getLongGauge("Priority.1.CompletedCallVolume", rb2);
double avgRespTimePriority0 = MetricsAsserts.getDoubleGauge("Priority.0.AvgResponseTime", rb2);
double avgRespTimePriority1 = MetricsAsserts.getDoubleGauge("Priority.1.AvgResponseTime", rb2);
LOG.info("DecayedCallVolume: " + decayedCallVolume1);
LOG.info("CallVolume: " + rawCallVolume1);
LOG.info("UniqueCaller: " + uniqueCaller1);
LOG.info("Priority.0.CompletedCallVolume: " + callVolumePriority0);
LOG.info("Priority.1.CompletedCallVolume: " + callVolumePriority1);
LOG.info("Priority.0.AvgResponseTime: " + avgRespTimePriority0);
LOG.info("Priority.1.AvgResponseTime: " + avgRespTimePriority1);
return decayedCallVolume1 > beginDecayedCallVolume && rawCallVolume1 > beginRawCallVolume && uniqueCaller1 > beginUniqueCaller;
}
}, 30, 60000);
}
} finally {
executorService.shutdown();
stop(server, proxy);
}
if (lastException != null) {
LOG.error("Last received non-RetriableException:", lastException);
}
assertTrue("RetriableException not received", succeeded);
}
Aggregations