Search in sources :

Example 76 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hive by apache.

the class HiveClientCache method createCleanupThread.

private ScheduledFuture<?> createCleanupThread(long interval) {
    // Add a maintenance thread that will attempt to trigger a cache clean continuously
    Runnable cleanupThread = new Runnable() {

        @Override
        public void run() {
            cleanup();
        }
    };
    /**
     * Create the cleanup handle. In addition to cleaning up every cleanupInterval, we add
     * a slight offset, so that the very first time it runs, it runs with a slight delay, so
     * as to catch any other connections that were closed when the first timeout happened.
     * As a result, the time we can expect an unused connection to be reaped is
     * 5 seconds after the first timeout, and then after that, it'll check for whether or not
     * it can be cleaned every max(DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS,timeout) seconds
     */
    ThreadFactory daemonThreadFactory = (new ThreadFactoryBuilder()).setDaemon(true).setNameFormat("HiveClientCache-cleaner-%d").build();
    return Executors.newScheduledThreadPool(1, daemonThreadFactory).scheduleWithFixedDelay(cleanupThread, timeout + 5, interval, TimeUnit.SECONDS);
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder)

Example 77 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hive by apache.

the class LlapZookeeperRegistryImpl method checkPathChildrenCache.

private synchronized void checkPathChildrenCache(long clusterReadyTimeoutMs) throws IOException {
    Preconditions.checkArgument(zooKeeperClient != null && zooKeeperClient.getState() == CuratorFrameworkState.STARTED, "client is not started");
    // lazily create PathChildrenCache
    if (instancesCache != null)
        return;
    ExecutorService tp = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("StateChangeNotificationHandler").build());
    long startTimeNs = System.nanoTime(), deltaNs = clusterReadyTimeoutMs * 1000000L;
    long sleepTimeMs = Math.min(16, clusterReadyTimeoutMs);
    while (true) {
        PathChildrenCache instancesCache = new PathChildrenCache(zooKeeperClient, workersPath, true);
        instancesCache.getListenable().addListener(new InstanceStateChangeListener(), tp);
        try {
            instancesCache.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE);
            this.instancesCache = instancesCache;
            break;
        } catch (InvalidACLException e) {
            // PathChildrenCache tried to mkdir when the znode wasn't there, and failed.
            CloseableUtils.closeQuietly(instancesCache);
            long elapsedNs = System.nanoTime() - startTimeNs;
            if (deltaNs == 0 || deltaNs <= elapsedNs) {
                LOG.error("Unable to start curator PathChildrenCache", e);
                throw new IOException(e);
            }
            LOG.warn("The cluster is not started yet (InvalidACL); will retry");
            try {
                Thread.sleep(Math.min(sleepTimeMs, (deltaNs - elapsedNs) / 1000000L));
            } catch (InterruptedException e1) {
                LOG.error("Interrupted while retrying the PathChildrenCache startup");
                throw new IOException(e1);
            }
            sleepTimeMs = sleepTimeMs << 1;
        } catch (Exception e) {
            CloseableUtils.closeQuietly(instancesCache);
            LOG.error("Unable to start curator PathChildrenCache", e);
            throw new IOException(e);
        }
    }
}
Also used : ServiceInstanceStateChangeListener(org.apache.hadoop.hive.llap.registry.ServiceInstanceStateChangeListener) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) ExecutorService(java.util.concurrent.ExecutorService) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) IOException(java.io.IOException) InvalidACLException(org.apache.zookeeper.KeeperException.InvalidACLException) URISyntaxException(java.net.URISyntaxException) MalformedURLException(java.net.MalformedURLException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) InvalidACLException(org.apache.zookeeper.KeeperException.InvalidACLException)

Example 78 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hive by apache.

the class Hive method loadDynamicPartitions.

/**
   * Given a source directory name of the load path, load all dynamically generated partitions
   * into the specified table and return a list of strings that represent the dynamic partition
   * paths.
   * @param loadPath
   * @param tableName
   * @param partSpec
   * @param replace
   * @param numDP number of dynamic partitions
   * @param listBucketingEnabled
   * @param isAcid true if this is an ACID operation
   * @param txnId txnId, can be 0 unless isAcid == true
   * @return partition map details (PartitionSpec and Partition)
   * @throws HiveException
   */
public Map<Map<String, String>, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map<String, String> partSpec, final boolean replace, final int numDP, final boolean listBucketingEnabled, final boolean isAcid, final long txnId, final boolean hasFollowingStatsTask, final AcidUtils.Operation operation) throws HiveException {
    final Map<Map<String, String>, Partition> partitionsMap = Collections.synchronizedMap(new LinkedHashMap<Map<String, String>, Partition>());
    int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
    final ExecutorService pool = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitions-%d").build());
    // Get all valid partition paths and existing partitions for them (if any)
    final Table tbl = getTable(tableName);
    final Set<Path> validPartitions = getValidPartitionsInPath(numDP, loadPath);
    final int partsToLoad = validPartitions.size();
    final AtomicInteger partitionsLoaded = new AtomicInteger(0);
    final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
    final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
    final SessionState parentSession = SessionState.get();
    final List<Future<Void>> futures = Lists.newLinkedList();
    try {
        // and load the partition based on that
        for (final Path partPath : validPartitions) {
            // generate a full partition specification
            final LinkedHashMap<String, String> fullPartSpec = Maps.newLinkedHashMap(partSpec);
            Warehouse.makeSpecFromName(fullPartSpec, partPath);
            futures.add(pool.submit(new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    try {
                        // move file would require session details (needCopy() invokes SessionState.get)
                        SessionState.setCurrentSessionState(parentSession);
                        LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);
                        // load the partition
                        Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, true, listBucketingEnabled, false, isAcid, hasFollowingStatsTask);
                        partitionsMap.put(fullPartSpec, newPartition);
                        if (inPlaceEligible) {
                            synchronized (ps) {
                                InPlaceUpdate.rePositionCursor(ps);
                                partitionsLoaded.incrementAndGet();
                                InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + partsToLoad + " partitions.");
                            }
                        }
                        return null;
                    } catch (Exception t) {
                        LOG.error("Exception when loading partition with parameters " + " partPath=" + partPath + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " replace=" + replace + ", " + " listBucketingEnabled=" + listBucketingEnabled + ", " + " isAcid=" + isAcid + ", " + " hasFollowingStatsTask=" + hasFollowingStatsTask, t);
                        throw t;
                    }
                }
            }));
        }
        pool.shutdown();
        LOG.debug("Number of partitions to be added is " + futures.size());
        for (Future future : futures) {
            future.get();
        }
    } catch (InterruptedException | ExecutionException e) {
        LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
        //cancel other futures
        for (Future future : futures) {
            future.cancel(true);
        }
        throw new HiveException("Exception when loading " + partsToLoad + " in table " + tbl.getTableName() + " with loadPath=" + loadPath, e);
    }
    try {
        if (isAcid) {
            List<String> partNames = new ArrayList<>(partitionsMap.size());
            for (Partition p : partitionsMap.values()) {
                partNames.add(p.getName());
            }
            getMSC().addDynamicPartitions(txnId, tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation));
        }
        LOG.info("Loaded " + partitionsMap.size() + " partitions");
        return partitionsMap;
    } catch (TException te) {
        throw new HiveException("Exception updating metastore for acid table " + tableName + " with partitions " + partitionsMap.values(), te);
    }
}
Also used : TException(org.apache.thrift.TException) SessionState(org.apache.hadoop.hive.ql.session.SessionState) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ExecutionException(java.util.concurrent.ExecutionException) Path(org.apache.hadoop.fs.Path) PrintStream(java.io.PrintStream) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 79 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project bazel by bazelbuild.

the class ExecutorUtil method newSlackPool.

/**
   * Create a "slack" thread pool which has the following properties:
   * 1. the worker count shrinks as the threads go unused
   * 2. the rejection policy is caller-runs
   *
   * @param threads maximum number of threads in the pool
   * @param name name of the pool
   * @return the new ThreadPoolExecutor
   */
public static ThreadPoolExecutor newSlackPool(int threads, String name) {
    // Using a synchronous queue with a bounded thread pool means we'll reject
    // tasks after the pool size. The CallerRunsPolicy, however, implies that
    // saturation is handled in the calling thread.
    ThreadPoolExecutor pool = new ThreadPoolExecutor(threads, threads, 3L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>());
    // Do not consume threads when not in use.
    pool.allowCoreThreadTimeOut(true);
    pool.setThreadFactory(new ThreadFactoryBuilder().setNameFormat(name + " %d").build());
    pool.setRejectedExecutionHandler(new RejectedExecutionHandler() {

        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            r.run();
        }
    });
    return pool;
}
Also used : RejectedExecutionHandler(java.util.concurrent.RejectedExecutionHandler) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 80 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project bazel by bazelbuild.

the class FilesystemValueChecker method getDirtyValues.

private BatchDirtyResult getDirtyValues(ValueFetcher fetcher, Iterable<SkyKey> keys, final SkyValueDirtinessChecker checker, final boolean checkMissingValues) throws InterruptedException {
    ExecutorService executor = Executors.newFixedThreadPool(DIRTINESS_CHECK_THREADS, new ThreadFactoryBuilder().setNameFormat("FileSystem Value Invalidator %d").build());
    final BatchDirtyResult batchResult = new BatchDirtyResult();
    ThrowableRecordingRunnableWrapper wrapper = new ThrowableRecordingRunnableWrapper("FilesystemValueChecker#getDirtyValues");
    final AtomicInteger numKeysScanned = new AtomicInteger(0);
    final AtomicInteger numKeysChecked = new AtomicInteger(0);
    ElapsedTimeReceiver elapsedTimeReceiver = new ElapsedTimeReceiver() {

        @Override
        public void accept(long elapsedTimeNanos) {
            if (elapsedTimeNanos > 0) {
                LOG.info(String.format("Spent %d ms checking %d filesystem nodes (%d scanned)", TimeUnit.MILLISECONDS.convert(elapsedTimeNanos, TimeUnit.NANOSECONDS), numKeysChecked.get(), numKeysScanned.get()));
            }
        }
    };
    try (AutoProfiler prof = AutoProfiler.create(elapsedTimeReceiver)) {
        for (final SkyKey key : keys) {
            numKeysScanned.incrementAndGet();
            if (!checker.applies(key)) {
                continue;
            }
            final SkyValue value = fetcher.get(key);
            if (!checkMissingValues && value == null) {
                continue;
            }
            executor.execute(wrapper.wrap(new Runnable() {

                @Override
                public void run() {
                    numKeysChecked.incrementAndGet();
                    DirtyResult result = checker.check(key, value, tsgm);
                    if (result.isDirty()) {
                        batchResult.add(key, value, result.getNewValue());
                    }
                }
            }));
        }
        boolean interrupted = ExecutorUtil.interruptibleShutdown(executor);
        Throwables.propagateIfPossible(wrapper.getFirstThrownError());
        if (interrupted) {
            throw new InterruptedException();
        }
    }
    return batchResult;
}
Also used : SkyKey(com.google.devtools.build.skyframe.SkyKey) AutoProfiler(com.google.devtools.build.lib.profiler.AutoProfiler) DirtyResult(com.google.devtools.build.lib.skyframe.SkyValueDirtinessChecker.DirtyResult) ElapsedTimeReceiver(com.google.devtools.build.lib.profiler.AutoProfiler.ElapsedTimeReceiver) SkyValue(com.google.devtools.build.skyframe.SkyValue) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThrowableRecordingRunnableWrapper(com.google.devtools.build.lib.concurrent.ThrowableRecordingRunnableWrapper)

Aggregations

ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)143 ExecutorService (java.util.concurrent.ExecutorService)49 ThreadFactory (java.util.concurrent.ThreadFactory)46 IOException (java.io.IOException)23 Future (java.util.concurrent.Future)19 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)19 ExecutionException (java.util.concurrent.ExecutionException)17 ArrayList (java.util.ArrayList)15 Callable (java.util.concurrent.Callable)12 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)12 HashMap (java.util.HashMap)11 Path (org.apache.hadoop.fs.Path)11 LinkedList (java.util.LinkedList)10 Map (java.util.Map)10 HashSet (java.util.HashSet)9 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)9 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)9 Test (org.junit.Test)9 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)8 Before (org.junit.Before)8