use of com.google.common.util.concurrent.ListenableFuture in project jackrabbit-oak by apache.
the class UploadStagingCacheTest method testInvalidate.
/**
* Invalidate after staging before upload.
* @throws Exception
*/
@Test
public void testInvalidate() throws Exception {
// add load
List<ListenableFuture<Integer>> futures = put(folder);
// Check invalidate
stagingCache.invalidate(ID_PREFIX + 0);
File file = stagingCache.getIfPresent(ID_PREFIX + 0);
assertNull(file);
//start
taskLatch.countDown();
callbackLatch.countDown();
waitFinish(futures);
assertCacheStats(stagingCache, 0, 0, 1, 1);
// Should not return anything
file = stagingCache.getIfPresent(ID_PREFIX + 0);
assertNull(file);
}
use of com.google.common.util.concurrent.ListenableFuture in project beam by apache.
the class PackageUtil method stageClasspathElements.
// Visible for testing.
static List<DataflowPackage> stageClasspathElements(Collection<String> classpathElements, final String stagingPath, final Sleeper retrySleeper, ListeningExecutorService executorService, final CreateOptions createOptions) {
LOG.info("Uploading {} files from PipelineOptions.filesToStage to staging location to " + "prepare for execution.", classpathElements.size());
if (classpathElements.size() > SANE_CLASSPATH_SIZE) {
LOG.warn("Your classpath contains {} elements, which Google Cloud Dataflow automatically " + "copies to all workers. Having this many entries on your classpath may be indicative " + "of an issue in your pipeline. You may want to consider trimming the classpath to " + "necessary dependencies only, using --filesToStage pipeline option to override " + "what files are being staged, or bundling several dependencies into one.", classpathElements.size());
}
checkArgument(stagingPath != null, "Can't stage classpath elements because no staging location has been provided");
// Inline a copy here because the inner code returns an immutable list and we want to mutate it.
List<PackageAttributes> packageAttributes = new LinkedList<>(computePackageAttributes(classpathElements, stagingPath, executorService));
// Compute the returned list of DataflowPackage objects here so that they are returned in the
// same order as on the classpath.
List<DataflowPackage> packages = Lists.newArrayListWithExpectedSize(packageAttributes.size());
for (final PackageAttributes attributes : packageAttributes) {
packages.add(attributes.getDataflowPackage());
}
// Order package attributes in descending size order so that we upload the largest files first.
Collections.sort(packageAttributes, new PackageUploadOrder());
final AtomicInteger numUploaded = new AtomicInteger(0);
final AtomicInteger numCached = new AtomicInteger(0);
List<ListenableFuture<?>> futures = new LinkedList<>();
for (final PackageAttributes attributes : packageAttributes) {
futures.add(executorService.submit(new Runnable() {
@Override
public void run() {
stageOnePackage(attributes, numUploaded, numCached, retrySleeper, createOptions);
}
}));
}
try {
Futures.allAsList(futures).get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted while staging packages", e);
} catch (ExecutionException e) {
throw new RuntimeException("Error while staging packages", e.getCause());
}
LOG.info("Staging files complete: {} files cached, {} files newly uploaded", numCached.get(), numUploaded.get());
return packages;
}
use of com.google.common.util.concurrent.ListenableFuture in project helios by spotify.
the class HeliosSoloDeploymentTest method testUndeployLeftoverJobs.
@Test
public void testUndeployLeftoverJobs() throws Exception {
final HeliosSoloDeployment solo = buildHeliosSoloDeployment();
final ListenableFuture<List<String>> hostsFuture = Futures.<List<String>>immediateFuture(ImmutableList.of(HOST1, HOST2));
when(heliosClient.listHosts()).thenReturn(hostsFuture);
// These futures represent HostStatuses when the job is still deployed
final ListenableFuture<HostStatus> statusFuture11 = Futures.immediateFuture(HostStatus.newBuilder().setStatus(Status.UP).setStatuses(ImmutableMap.of(JOB_ID1, TASK_STATUS1)).setJobs(ImmutableMap.of(JOB_ID1, Deployment.of(JOB_ID1, Goal.START))).build());
final ListenableFuture<HostStatus> statusFuture21 = Futures.immediateFuture(HostStatus.newBuilder().setStatus(Status.UP).setStatuses(ImmutableMap.of(JOB_ID2, TASK_STATUS2)).setJobs(ImmutableMap.of(JOB_ID2, Deployment.of(JOB_ID2, Goal.START))).build());
// These futures represent HostStatuses when the job is undeployed
final ListenableFuture<HostStatus> statusFuture12 = Futures.immediateFuture(HostStatus.newBuilder().setStatus(Status.UP).setStatuses(Collections.<JobId, TaskStatus>emptyMap()).setJobs(ImmutableMap.of(JOB_ID1, Deployment.of(JOB_ID1, Goal.START))).build());
final ListenableFuture<HostStatus> statusFuture22 = Futures.immediateFuture(HostStatus.newBuilder().setStatus(Status.UP).setStatuses(Collections.<JobId, TaskStatus>emptyMap()).setJobs(ImmutableMap.of(JOB_ID2, Deployment.of(JOB_ID2, Goal.START))).build());
//noinspection unchecked
when(heliosClient.hostStatus(HOST1)).thenReturn(statusFuture11);
//noinspection unchecked
when(heliosClient.hostStatus(HOST2)).thenReturn(statusFuture21);
final ListenableFuture<JobUndeployResponse> undeployFuture1 = Futures.immediateFuture(new JobUndeployResponse(JobUndeployResponse.Status.OK, HOST1, JOB_ID1));
final ListenableFuture<JobUndeployResponse> undeployFuture2 = Futures.immediateFuture(new JobUndeployResponse(JobUndeployResponse.Status.OK, HOST2, JOB_ID2));
// when undeploy is called, respond correctly & patch the mock to return
// the undeployed HostStatus
when(heliosClient.undeploy(JOB_ID1, HOST1)).thenAnswer(new Answer<ListenableFuture<JobUndeployResponse>>() {
@Override
public ListenableFuture<JobUndeployResponse> answer(final InvocationOnMock invocation) throws Throwable {
when(heliosClient.hostStatus(HOST1)).thenReturn(statusFuture12);
return undeployFuture1;
}
});
when(heliosClient.undeploy(JOB_ID2, HOST2)).thenAnswer(new Answer<ListenableFuture<JobUndeployResponse>>() {
@Override
public ListenableFuture<JobUndeployResponse> answer(final InvocationOnMock invocation) throws Throwable {
when(heliosClient.hostStatus(HOST1)).thenReturn(statusFuture22);
return undeployFuture2;
}
});
solo.undeployLeftoverJobs();
verify(heliosClient).undeploy(JOB_ID1, HOST1);
verify(heliosClient).undeploy(JOB_ID2, HOST2);
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testContentConsumerLifecycle.
@Test
public void testContentConsumerLifecycle() throws Exception {
// Set to have one thread only for testing context capture and release
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
CountDownLatch uploadLatch = new CountDownLatch(1);
// Create five concurrent upload
List<ListenableFuture<Integer>> completions = new ArrayList<>();
for (int i = 0; i < 5; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be six handler instances initialized.
// Five for the in-progress upload, one for the getStates call
Tasks.waitFor(6, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return getStates(serviceManager).size();
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Finish the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Get the states, there should still be six handler instances initialized.
final Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(6, states.size());
// Do another round of six concurrent upload. It should reuse all of the existing six contexts
completions.clear();
uploadLatch = new CountDownLatch(1);
for (int i = 0; i < 6; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be seven handler instances initialized.
// Six for the in-progress upload, one for the getStates call
// Out of the 7 states, six of them should be the same as the old one
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Multimap<Integer, String> newStates = getStates(serviceManager);
if (newStates.size() != 7) {
return false;
}
for (Map.Entry<Integer, String> entry : states.entries()) {
if (!newStates.containsEntry(entry.getKey(), entry.getValue())) {
return false;
}
}
return true;
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Complete the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Query the queue size metrics. Expect the maximum be 6.
// This is because only the six from the concurrent upload will get captured added back to the queue,
// while the one created for the getState() call will be stated in the thread cache, but not in the queue.
Tasks.waitFor(6L, new Callable<Long>() {
@Override
public Long call() throws Exception {
Map<String, String> context = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, Id.Namespace.DEFAULT.getId(), Constants.Metrics.Tag.APP, ServiceLifecycleApp.class.getSimpleName(), Constants.Metrics.Tag.SERVICE, "test");
MetricDataQuery metricQuery = new MetricDataQuery(0, Integer.MAX_VALUE, Integer.MAX_VALUE, "system.context.pool.size", AggregationFunction.MAX, context, ImmutableList.<String>of());
Iterator<MetricTimeSeries> result = getMetricsManager().query(metricQuery).iterator();
return result.hasNext() ? result.next().getTimeValues().get(0).getValue() : 0L;
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
}
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testContentConsumerProducerLifecycle.
@Test
public void testContentConsumerProducerLifecycle() throws Exception {
// Set to have one thread only for testing context capture and release
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
final DataSetManager<KeyValueTable> datasetManager = getDataset(ServiceLifecycleApp.HANDLER_TABLE_NAME);
// Clean up the dataset first to avoid being affected by other tests
datasetManager.get().delete(Bytes.toBytes("called"));
datasetManager.get().delete(Bytes.toBytes("completed"));
datasetManager.flush();
CountDownLatch uploadLatch = new CountDownLatch(1);
// Create five concurrent upload
List<ListenableFuture<Integer>> completions = new ArrayList<>();
for (int i = 0; i < 5; i++) {
completions.add(slowUpload(serviceManager, "POST", "uploadDownload", uploadLatch));
}
// Get the states, there should be six handler instances initialized.
// Five for the in-progress upload, one for the getStates call
Tasks.waitFor(6, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return getStates(serviceManager).size();
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Complete the upload
uploadLatch.countDown();
// Make sure the download through content producer has started
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
byte[] value = datasetManager.get().read("called");
datasetManager.flush();
if (value == null || value.length != Bytes.SIZEOF_LONG) {
return false;
}
return Bytes.toLong(value) > 5;
}
}, 10L, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Get the states, there should still be six handler instances since the ContentConsumer should
// be passing it's captured context to the ContentProducer without creating new one.
Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(6, states.size());
// Set the complete flag in the dataset
datasetManager.get().write("completed", Bytes.toBytes(true));
datasetManager.flush();
// Wait for completion
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the upload result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Get the states again, it should still be 6 same instances
Assert.assertEquals(states, getStates(serviceManager));
} finally {
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
}
}
Aggregations