use of org.apache.flink.shaded.guava30.com.google.common.util.concurrent.ThreadFactoryBuilder in project che by eclipse.
the class WorkspaceRuntimes method shutdown.
/**
* Terminates workspace runtimes service, so no more workspaces are allowed to start
* or to be stopped directly, all the running workspaces are going to be stopped,
* all the starting tasks will be eventually interrupted.
*
* @throws IllegalStateException
* if component shutdown is already called
*/
public void shutdown() throws InterruptedException {
if (!isShutdown.compareAndSet(false, true)) {
throw new IllegalStateException("Workspace runtimes service shutdown has been already called");
}
List<String> idsToStop;
try (@SuppressWarnings("unused") Unlocker u = locks.writeAllLock()) {
idsToStop = states.entrySet().stream().filter(e -> e.getValue().status != WorkspaceStatus.STOPPING).map(Map.Entry::getKey).collect(Collectors.toList());
states.clear();
}
if (!idsToStop.isEmpty()) {
LOG.info("Shutdown running environments, environments to stop: '{}'", idsToStop.size());
ExecutorService executor = Executors.newFixedThreadPool(2 * Runtime.getRuntime().availableProcessors(), new ThreadFactoryBuilder().setNameFormat("StopEnvironmentsPool-%d").setDaemon(false).build());
for (String id : idsToStop) {
executor.execute(() -> {
try {
envEngine.stop(id);
} catch (EnvironmentNotRunningException ignored) {
// might be already stopped
} catch (Exception x) {
LOG.error(x.getMessage(), x);
}
});
}
executor.shutdown();
try {
if (!executor.awaitTermination(30, TimeUnit.SECONDS)) {
executor.shutdownNow();
if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
LOG.error("Unable to stop runtimes termination pool");
}
}
} catch (InterruptedException e) {
executor.shutdownNow();
Thread.currentThread().interrupt();
}
}
}
use of org.apache.flink.shaded.guava30.com.google.common.util.concurrent.ThreadFactoryBuilder in project che by eclipse.
the class SystemManager method stopServices.
/**
* Stops some of the system services preparing system to lighter shutdown.
* System status is changed from {@link SystemStatus#RUNNING} to
* {@link SystemStatus#PREPARING_TO_SHUTDOWN}.
*
* @throws ConflictException
* when system status is different from running
*/
public void stopServices() throws ConflictException {
if (!statusRef.compareAndSet(RUNNING, PREPARING_TO_SHUTDOWN)) {
throw new ConflictException("System shutdown has been already called, system status: " + statusRef.get());
}
ExecutorService exec = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(false).setNameFormat("ShutdownSystemServicesPool").setUncaughtExceptionHandler(LoggingUncaughtExceptionHandler.getInstance()).build());
exec.execute(ThreadLocalPropagateContext.wrap(this::doStopServices));
exec.shutdown();
}
use of org.apache.flink.shaded.guava30.com.google.common.util.concurrent.ThreadFactoryBuilder in project che by eclipse.
the class WorkspaceRuntimeIntegrationTest method setUp.
@BeforeMethod
public void setUp() throws Exception {
CheEnvironmentEngine environmentEngine = new CheEnvironmentEngine(snapshotDao, machineInstanceProviders, "/tmp", 2000, eventService, environmentParser, new DefaultServicesStartStrategy(), instanceProvider, infrastructureProvisioner, "http://localhost:8080/api", recipeDownloader, containerNameGenerator, agentRegistry, sharedPool);
runtimes = new WorkspaceRuntimes(eventService, environmentEngine, agentSorter, launcherFactory, agentRegistry, snapshotDao, sharedPool);
executor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat(this.getClass().toString() + "-%d").build());
EnvironmentContext.getCurrent().setSubject(new SubjectImpl("name", "id", "token", false));
}
use of org.apache.flink.shaded.guava30.com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.
the class EntityGroupFSTimelineStore method serviceStart.
@Override
protected void serviceStart() throws Exception {
super.serviceStart();
LOG.info("Starting {}", getName());
summaryStore.start();
Configuration conf = getConfig();
aclManager = new TimelineACLsManager(conf);
aclManager.setTimelineStore(summaryStore);
summaryTdm = new TimelineDataManager(summaryStore, aclManager);
summaryTdm.init(conf);
addService(summaryTdm);
// start child services that aren't already started
super.serviceStart();
if (!fs.exists(activeRootPath)) {
fs.mkdirs(activeRootPath);
fs.setPermission(activeRootPath, ACTIVE_DIR_PERMISSION);
}
if (!fs.exists(doneRootPath)) {
fs.mkdirs(doneRootPath);
fs.setPermission(doneRootPath, DONE_DIR_PERMISSION);
}
objMapper = new ObjectMapper();
objMapper.setAnnotationIntrospector(new JaxbAnnotationIntrospector(TypeFactory.defaultInstance()));
jsonFactory = new MappingJsonFactory(objMapper);
final long scanIntervalSecs = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS_DEFAULT);
final long cleanerIntervalSecs = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CLEANER_INTERVAL_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CLEANER_INTERVAL_SECONDS_DEFAULT);
final int numThreads = conf.getInt(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_THREADS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_THREADS_DEFAULT);
LOG.info("Scanning active directory {} every {} seconds", activeRootPath, scanIntervalSecs);
LOG.info("Cleaning logs every {} seconds", cleanerIntervalSecs);
executor = new ScheduledThreadPoolExecutor(numThreads, new ThreadFactoryBuilder().setNameFormat("EntityLogPluginWorker #%d").build());
executor.scheduleAtFixedRate(new EntityLogScanner(), 0, scanIntervalSecs, TimeUnit.SECONDS);
executor.scheduleAtFixedRate(new EntityLogCleaner(), cleanerIntervalSecs, cleanerIntervalSecs, TimeUnit.SECONDS);
}
use of org.apache.flink.shaded.guava30.com.google.common.util.concurrent.ThreadFactoryBuilder in project hive by apache.
the class HiveMetaStoreChecker method checkPartitionDirs.
/**
* Assume that depth is 2, i.e., partition columns are a and b
* tblPath/a=1 => throw exception
* tblPath/a=1/file => throw exception
* tblPath/a=1/b=2/file => return a=1/b=2
* tblPath/a=1/b=2/c=3 => return a=1/b=2
* tblPath/a=1/b=2/c=3/file => return a=1/b=2
*
* @param basePath
* Start directory
* @param allDirs
* This set will contain the leaf paths at the end.
* @param maxDepth
* Specify how deep the search goes.
* @throws IOException
* Thrown if we can't get lists from the fs.
* @throws HiveException
*/
private void checkPartitionDirs(Path basePath, Set<Path> allDirs, int maxDepth) throws IOException, HiveException {
// Here we just reuse the THREAD_COUNT configuration for
// METASTORE_FS_HANDLER_THREADS_COUNT since this results in better performance
// The number of missing partitions discovered are later added by metastore using a
// threadpool of size METASTORE_FS_HANDLER_THREADS_COUNT. If we have different sized
// pool here the smaller sized pool of the two becomes a bottleneck
int poolSize = conf.getInt(ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT.varname, 15);
ExecutorService executor;
if (poolSize <= 1) {
LOG.debug("Using single-threaded version of MSCK-GetPaths");
executor = MoreExecutors.sameThreadExecutor();
} else {
LOG.debug("Using multi-threaded version of MSCK-GetPaths with number of threads " + poolSize);
ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("MSCK-GetPaths-%d").build();
executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(poolSize, threadFactory);
}
checkPartitionDirs(executor, basePath, allDirs, basePath.getFileSystem(conf), maxDepth);
executor.shutdown();
}
Aggregations