use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.
the class AppLevelTimelineCollector method serviceStart.
@Override
protected void serviceStart() throws Exception {
// Launch the aggregation thread
appAggregationExecutor = new ScheduledThreadPoolExecutor(AppLevelTimelineCollector.AGGREGATION_EXECUTOR_NUM_THREADS, new ThreadFactoryBuilder().setNameFormat("TimelineCollector Aggregation thread #%d").build());
appAggregator = new AppLevelAggregator();
appAggregationExecutor.scheduleAtFixedRate(appAggregator, AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS, AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS, TimeUnit.SECONDS);
super.serviceStart();
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project storm by apache.
the class HiveState method prepare.
public void prepare(Map conf, IMetricsContext metrics, int partitionIndex, int numPartitions) {
try {
if (options.getKerberosPrincipal() == null && options.getKerberosKeytab() == null) {
kerberosEnabled = false;
} else if (options.getKerberosPrincipal() != null && options.getKerberosKeytab() != null) {
kerberosEnabled = true;
} else {
throw new IllegalArgumentException("To enable Kerberos, need to set both KerberosPrincipal " + " & KerberosKeytab");
}
if (kerberosEnabled) {
try {
ugi = HiveUtils.authenticate(options.getKerberosKeytab(), options.getKerberosPrincipal());
} catch (HiveUtils.AuthenticationFailed ex) {
LOG.error("Hive kerberos authentication failed " + ex.getMessage(), ex);
throw new IllegalArgumentException(ex);
}
}
allWriters = new ConcurrentHashMap<HiveEndPoint, HiveWriter>();
String timeoutName = "hive-bolt-%d";
this.callTimeoutPool = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat(timeoutName).build());
heartBeatTimer = new Timer();
setupHeartBeatTimer();
} catch (Exception e) {
LOG.warn("unable to make connection to hive ", e);
}
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project storm by apache.
the class Localizer method startCleaner.
public void startCleaner() {
_cacheCleanupService = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().setNameFormat("Localizer Cache Cleanup").build());
_cacheCleanupService.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
handleCacheCleanup();
}
}, _cacheCleanupPeriod, _cacheCleanupPeriod, TimeUnit.MILLISECONDS);
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hive by apache.
the class StatsUtils method getFileSizeForPartitions.
/**
* Find the bytes on disks occupied by list of partitions
* @param conf
* - hive conf
* @param parts
* - partition list
* @return sizes of partitions
*/
public static List<Long> getFileSizeForPartitions(final HiveConf conf, List<Partition> parts) {
LOG.info("Number of partitions : " + parts.size());
ArrayList<Future<Long>> futures = new ArrayList<>();
int threads = Math.max(1, conf.getIntVar(ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT));
final ExecutorService pool = Executors.newFixedThreadPool(threads, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Get-Partitions-Size-%d").build());
final ArrayList<Long> sizes = new ArrayList<>(parts.size());
for (final Partition part : parts) {
final Path path = part.getDataLocation();
futures.add(pool.submit(new Callable<Long>() {
@Override
public Long call() throws Exception {
try {
LOG.debug("Partition path : " + path);
FileSystem fs = path.getFileSystem(conf);
return fs.getContentSummary(path).getLength();
} catch (IOException e) {
return 0L;
}
}
}));
}
try {
for (int i = 0; i < futures.size(); i++) {
sizes.add(i, futures.get(i).get());
}
} catch (InterruptedException | ExecutionException e) {
LOG.warn("Exception in processing files ", e);
} finally {
pool.shutdownNow();
}
return sizes;
}
use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hive by apache.
the class Utilities method getInputPaths.
/**
* Computes a list of all input paths needed to compute the given MapWork. All aliases
* are considered and a merged list of input paths is returned. If any input path points
* to an empty table or partition a dummy file in the scratch dir is instead created and
* added to the list. This is needed to avoid special casing the operator pipeline for
* these cases.
*
* @param job JobConf used to run the job
* @param work MapWork encapsulating the info about the task
* @param hiveScratchDir The tmp dir used to create dummy files if needed
* @param ctx Context object
* @return List of paths to process for the given MapWork
* @throws Exception
*/
public static List<Path> getInputPaths(JobConf job, MapWork work, Path hiveScratchDir, Context ctx, boolean skipDummy) throws Exception {
Set<Path> pathsProcessed = new HashSet<Path>();
List<Path> pathsToAdd = new LinkedList<Path>();
// AliasToWork contains all the aliases
for (String alias : work.getAliasToWork().keySet()) {
LOG.info("Processing alias " + alias);
// The alias may not have any path
boolean isEmptyTable = true;
boolean hasLogged = false;
// Note: this copies the list because createDummyFileForEmptyPartition may modify the map.
for (Path file : new LinkedList<Path>(work.getPathToAliases().keySet())) {
List<String> aliases = work.getPathToAliases().get(file);
if (aliases.contains(alias)) {
if (file != null) {
isEmptyTable = false;
} else {
LOG.warn("Found a null path for alias " + alias);
continue;
}
// processed only once
if (pathsProcessed.contains(file)) {
continue;
}
StringInternUtils.internUriStringsInPath(file);
pathsProcessed.add(file);
if (LOG.isDebugEnabled()) {
LOG.debug("Adding input file " + file);
} else if (!hasLogged) {
hasLogged = true;
LOG.info("Adding " + work.getPathToAliases().size() + " inputs; the first input is " + file);
}
pathsToAdd.add(file);
}
}
// rows)
if (isEmptyTable && !skipDummy) {
pathsToAdd.add(createDummyFileForEmptyTable(job, work, hiveScratchDir, alias));
}
}
ExecutorService pool = null;
int numExecutors = getMaxExecutorsForInputListing(job, pathsToAdd.size());
if (numExecutors > 1) {
pool = Executors.newFixedThreadPool(numExecutors, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Get-Input-Paths-%d").build());
}
List<Path> finalPathsToAdd = new LinkedList<>();
List<Future<Path>> futures = new LinkedList<>();
for (final Path path : pathsToAdd) {
if (pool == null) {
finalPathsToAdd.add(new GetInputPathsCallable(path, job, work, hiveScratchDir, ctx, skipDummy).call());
} else {
futures.add(pool.submit(new GetInputPathsCallable(path, job, work, hiveScratchDir, ctx, skipDummy)));
}
}
if (pool != null) {
for (Future<Path> future : futures) {
finalPathsToAdd.add(future.get());
}
}
return finalPathsToAdd;
}
Aggregations