use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class SparkRuntimeService method startUp.
@Override
protected void startUp() throws Exception {
// additional spark job initialization at run-time
// This context is for calling initialize and onFinish on the Spark program
// Fields injection for the Spark program
// It has to be done in here instead of in SparkProgramRunner for the @UseDataset injection
// since the dataset cache being used in Spark is a MultiThreadDatasetCache
// The AbstractExecutionThreadService guarantees that startUp(), run() and shutDown() all happens in the same thread
Reflections.visit(spark, spark.getClass(), new PropertyFieldSetter(runtimeContext.getSparkSpecification().getProperties()), new DataSetFieldSetter(runtimeContext.getDatasetCache()), new MetricsFieldSetter(runtimeContext));
// Creates a temporary directory locally for storing all generated files.
File tempDir = DirUtils.createTempDir(new File(cConf.get(Constants.CFG_LOCAL_DATA_DIR), cConf.get(Constants.AppFabric.TEMP_DIR)).getAbsoluteFile());
tempDir.mkdirs();
this.cleanupTask = createCleanupTask(tempDir, System.getProperties());
try {
initialize();
SparkRuntimeContextConfig contextConfig = new SparkRuntimeContextConfig(runtimeContext.getConfiguration());
final File jobJar = generateJobJar(tempDir, contextConfig.isLocal(), cConf);
final List<LocalizeResource> localizeResources = new ArrayList<>();
String metricsConfPath;
String classpath = "";
if (contextConfig.isLocal()) {
// In local mode, always copy (or link if local) user requested resources
copyUserResources(context.getLocalizeResources(), tempDir);
File metricsConf = SparkMetricsSink.writeConfig(new File(tempDir, CDAP_METRICS_PROPERTIES));
metricsConfPath = metricsConf.getAbsolutePath();
} else {
// Localize all user requested files in distributed mode
distributedUserResources(context.getLocalizeResources(), localizeResources);
// Localize program jar and the expanding program jar
File programJar = Locations.linkOrCopy(runtimeContext.getProgram().getJarLocation(), new File(tempDir, SparkRuntimeContextProvider.PROGRAM_JAR_NAME));
File expandedProgramJar = Locations.linkOrCopy(runtimeContext.getProgram().getJarLocation(), new File(tempDir, SparkRuntimeContextProvider.PROGRAM_JAR_EXPANDED_NAME));
// Localize both the unexpanded and expanded program jar
localizeResources.add(new LocalizeResource(programJar));
localizeResources.add(new LocalizeResource(expandedProgramJar, true));
// Localize plugins
if (pluginArchive != null) {
localizeResources.add(new LocalizeResource(pluginArchive, true));
}
// Create and localize the launcher jar, which is for setting up services and classloader for spark containers
localizeResources.add(new LocalizeResource(createLauncherJar(tempDir)));
// Create metrics conf file in the current directory since
// the same value for the "spark.metrics.conf" config needs to be used for both driver and executor processes
// Also localize the metrics conf file to the executor nodes
File metricsConf = SparkMetricsSink.writeConfig(new File(CDAP_METRICS_PROPERTIES));
metricsConfPath = metricsConf.getName();
localizeResources.add(new LocalizeResource(metricsConf));
// Localize the cConf file
localizeResources.add(new LocalizeResource(saveCConf(cConf, tempDir)));
// Preserves and localize runtime information in the hConf
Configuration hConf = contextConfig.set(runtimeContext, pluginArchive).getConfiguration();
localizeResources.add(new LocalizeResource(saveHConf(hConf, tempDir)));
// Joiner for creating classpath for spark containers
Joiner joiner = Joiner.on(File.pathSeparator).skipNulls();
// Localize the spark.jar archive, which contains all CDAP and dependency jars
File sparkJar = new File(tempDir, CDAP_SPARK_JAR);
classpath = joiner.join(Iterables.transform(buildDependencyJar(sparkJar), new Function<String, String>() {
@Override
public String apply(String name) {
return Paths.get("$PWD", CDAP_SPARK_JAR, name).toString();
}
}));
localizeResources.add(new LocalizeResource(sparkJar, true));
// Localize logback if there is one. It is placed at the beginning of the classpath
File logbackJar = ProgramRunners.createLogbackJar(new File(tempDir, "logback.xml.jar"));
if (logbackJar != null) {
localizeResources.add(new LocalizeResource(logbackJar));
classpath = joiner.join(Paths.get("$PWD", logbackJar.getName()), classpath);
}
// Localize extra jars and append to the end of the classpath
List<String> extraJars = new ArrayList<>();
for (URI jarURI : CConfigurationUtil.getExtraJars(cConf)) {
extraJars.add(Paths.get("$PWD", LocalizationUtils.getLocalizedName(jarURI)).toString());
localizeResources.add(new LocalizeResource(jarURI, false));
}
classpath = joiner.join(classpath, joiner.join(extraJars));
}
final Map<String, String> configs = createSubmitConfigs(tempDir, metricsConfPath, classpath, context.getLocalizeResources(), contextConfig.isLocal());
submitSpark = new Callable<ListenableFuture<RunId>>() {
@Override
public ListenableFuture<RunId> call() throws Exception {
// This happen when stop() was called whiling starting
if (!isRunning()) {
return immediateCancelledFuture();
}
return sparkSubmitter.submit(runtimeContext, configs, localizeResources, jobJar, runtimeContext.getRunId());
}
};
} catch (LinkageError e) {
// of the user program is missing dependencies (CDAP-2543)
throw new Exception(e.getMessage(), e);
} catch (Throwable t) {
cleanupTask.run();
throw t;
}
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testContentConsumerLifecycle.
@Test
public void testContentConsumerLifecycle() throws Exception {
// Set to have one thread only for testing context capture and release
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
CountDownLatch uploadLatch = new CountDownLatch(1);
// Create five concurrent upload
List<ListenableFuture<Integer>> completions = new ArrayList<>();
for (int i = 0; i < 5; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be six handler instances initialized.
// Five for the in-progress upload, one for the getStates call
Tasks.waitFor(6, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return getStates(serviceManager).size();
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Finish the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Get the states, there should still be six handler instances initialized.
final Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(6, states.size());
// Do another round of six concurrent upload. It should reuse all of the existing six contexts
completions.clear();
uploadLatch = new CountDownLatch(1);
for (int i = 0; i < 6; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be seven handler instances initialized.
// Six for the in-progress upload, one for the getStates call
// Out of the 7 states, six of them should be the same as the old one
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Multimap<Integer, String> newStates = getStates(serviceManager);
if (newStates.size() != 7) {
return false;
}
for (Map.Entry<Integer, String> entry : states.entries()) {
if (!newStates.containsEntry(entry.getKey(), entry.getValue())) {
return false;
}
}
return true;
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Complete the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Query the queue size metrics. Expect the maximum be 6.
// This is because only the six from the concurrent upload will get captured added back to the queue,
// while the one created for the getState() call will be stated in the thread cache, but not in the queue.
Tasks.waitFor(6L, new Callable<Long>() {
@Override
public Long call() throws Exception {
Map<String, String> context = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, Id.Namespace.DEFAULT.getId(), Constants.Metrics.Tag.APP, ServiceLifecycleApp.class.getSimpleName(), Constants.Metrics.Tag.SERVICE, "test");
MetricDataQuery metricQuery = new MetricDataQuery(0, Integer.MAX_VALUE, Integer.MAX_VALUE, "system.context.pool.size", AggregationFunction.MAX, context, ImmutableList.<String>of());
Iterator<MetricTimeSeries> result = getMetricsManager().query(metricQuery).iterator();
return result.hasNext() ? result.next().getTimeValues().get(0).getValue() : 0L;
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
}
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testContentConsumerProducerLifecycle.
@Test
public void testContentConsumerProducerLifecycle() throws Exception {
// Set to have one thread only for testing context capture and release
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
final DataSetManager<KeyValueTable> datasetManager = getDataset(ServiceLifecycleApp.HANDLER_TABLE_NAME);
// Clean up the dataset first to avoid being affected by other tests
datasetManager.get().delete(Bytes.toBytes("called"));
datasetManager.get().delete(Bytes.toBytes("completed"));
datasetManager.flush();
CountDownLatch uploadLatch = new CountDownLatch(1);
// Create five concurrent upload
List<ListenableFuture<Integer>> completions = new ArrayList<>();
for (int i = 0; i < 5; i++) {
completions.add(slowUpload(serviceManager, "POST", "uploadDownload", uploadLatch));
}
// Get the states, there should be six handler instances initialized.
// Five for the in-progress upload, one for the getStates call
Tasks.waitFor(6, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return getStates(serviceManager).size();
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Complete the upload
uploadLatch.countDown();
// Make sure the download through content producer has started
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
byte[] value = datasetManager.get().read("called");
datasetManager.flush();
if (value == null || value.length != Bytes.SIZEOF_LONG) {
return false;
}
return Bytes.toLong(value) > 5;
}
}, 10L, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Get the states, there should still be six handler instances since the ContentConsumer should
// be passing it's captured context to the ContentProducer without creating new one.
Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(6, states.size());
// Set the complete flag in the dataset
datasetManager.get().write("completed", Bytes.toBytes(true));
datasetManager.flush();
// Wait for completion
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the upload result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Get the states again, it should still be 6 same instances
Assert.assertEquals(states, getStates(serviceManager));
} finally {
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
}
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap-ingest by caskdata.
the class FileTailerSinkTest method getDummyConcurrentWriter.
private StreamWriter getDummyConcurrentWriter(final AtomicInteger count) {
StreamWriter writerMock = Mockito.mock(StreamWriter.class);
Mockito.doAnswer(new Answer<ListenableFuture<Void>>() {
@Override
public ListenableFuture<Void> answer(InvocationOnMock invocationOnMock) throws Throwable {
count.incrementAndGet();
return Futures.immediateFuture((Void) null);
}
}).when(writerMock).write("test", Charset.defaultCharset());
return writerMock;
}
use of com.google.common.util.concurrent.ListenableFuture in project cdap by caskdata.
the class HBaseTestBase method parallelRun.
/**
* Executes the given list of Runnable in parallel using a fixed thread pool executor. This method blocks
* until all runnables finished.
*/
private void parallelRun(List<? extends Runnable> runnables) {
ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(runnables.size()));
try {
List<ListenableFuture<?>> futures = new ArrayList<>(runnables.size());
for (Runnable r : runnables) {
futures.add(executor.submit(r));
}
Futures.getUnchecked(Futures.allAsList(futures));
} finally {
executor.shutdownNow();
try {
executor.awaitTermination(60, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOG.error("Interrupted", e);
}
}
}
Aggregations