Search in sources :

Example 26 with ThreadPoolExecutor

use of java.util.concurrent.ThreadPoolExecutor in project hadoop by apache.

the class TestLogAggregationService method testFixedSizeThreadPool.

@Test(timeout = 30000)
public void testFixedSizeThreadPool() throws Exception {
    // store configured thread pool size temporarily for restoration
    int initThreadPoolSize = conf.getInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, YarnConfiguration.DEFAULT_NM_LOG_AGGREGATION_THREAD_POOL_SIZE);
    int threadPoolSize = 3;
    conf.setInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, threadPoolSize);
    DeletionService delSrvc = mock(DeletionService.class);
    LocalDirsHandlerService dirSvc = mock(LocalDirsHandlerService.class);
    when(dirSvc.getLogDirs()).thenThrow(new RuntimeException());
    LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, delSrvc, dirSvc);
    logAggregationService.init(this.conf);
    logAggregationService.start();
    ExecutorService executorService = logAggregationService.threadPool;
    // used to block threads in the thread pool because main thread always
    // acquires the write lock first.
    final ReadWriteLock rwLock = new ReentrantReadWriteLock();
    final Lock rLock = rwLock.readLock();
    final Lock wLock = rwLock.writeLock();
    try {
        wLock.lock();
        Runnable runnable = new Runnable() {

            @Override
            public void run() {
                try {
                    // threads in the thread pool running this will be blocked
                    rLock.tryLock(35000, TimeUnit.MILLISECONDS);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } finally {
                    rLock.unlock();
                }
            }
        };
        // created in the thread pool, each of which is blocked on the read lock.
        for (int i = 0; i < threadPoolSize + 1; i++) {
            executorService.submit(runnable);
        }
        // count the number of current running LogAggregationService threads
        int runningThread = ((ThreadPoolExecutor) executorService).getActiveCount();
        assertEquals(threadPoolSize, runningThread);
    } finally {
        wLock.unlock();
    }
    logAggregationService.stop();
    logAggregationService.close();
    // restore the original configurations to avoid side effects
    conf.setInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, initThreadPoolSize);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) ExecutorService(java.util.concurrent.ExecutorService) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Lock(java.util.concurrent.locks.Lock) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 27 with ThreadPoolExecutor

use of java.util.concurrent.ThreadPoolExecutor in project hadoop by apache.

the class S3AFileSystem method initialize.

/** Called after a new FileSystem instance is constructed.
   * @param name a uri whose authority section names the host, port, etc.
   *   for this FileSystem
   * @param originalConf the configuration to use for the FS. The
   * bucket-specific options are patched over the base ones before any use is
   * made of the config.
   */
public void initialize(URI name, Configuration originalConf) throws IOException {
    uri = S3xLoginHelper.buildFSURI(name);
    // get the host; this is guaranteed to be non-null, non-empty
    bucket = name.getHost();
    // clone the configuration into one with propagated bucket options
    Configuration conf = propagateBucketOptions(originalConf, bucket);
    patchSecurityCredentialProviders(conf);
    super.initialize(name, conf);
    setConf(conf);
    try {
        instrumentation = new S3AInstrumentation(name);
        // Username is the current user at the time the FS was instantiated.
        username = UserGroupInformation.getCurrentUser().getShortUserName();
        workingDir = new Path("/user", username).makeQualified(this.uri, this.getWorkingDirectory());
        Class<? extends S3ClientFactory> s3ClientFactoryClass = conf.getClass(S3_CLIENT_FACTORY_IMPL, DEFAULT_S3_CLIENT_FACTORY_IMPL, S3ClientFactory.class);
        s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf).createS3Client(name, uri);
        maxKeys = intOption(conf, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS, 1);
        listing = new Listing(this);
        partSize = getMultipartSizeProperty(conf, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
        multiPartThreshold = getMultipartSizeProperty(conf, MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
        //check but do not store the block size
        longBytesOption(conf, FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE, 1);
        enableMultiObjectsDelete = conf.getBoolean(ENABLE_MULTI_DELETE, true);
        readAhead = longBytesOption(conf, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE, 0);
        storageStatistics = (S3AStorageStatistics) GlobalStorageStatistics.INSTANCE.put(S3AStorageStatistics.NAME, new GlobalStorageStatistics.StorageStatisticsProvider() {

            @Override
            public StorageStatistics provide() {
                return new S3AStorageStatistics();
            }
        });
        int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
        if (maxThreads < 2) {
            LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
            maxThreads = 2;
        }
        int totalTasks = intOption(conf, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS, 1);
        long keepAliveTime = longOption(conf, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME, 0);
        boundedThreadPool = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks, keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared");
        unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded"));
        initTransferManager();
        initCannedAcls(conf);
        verifyBucketExists();
        initMultipartUploads(conf);
        serverSideEncryptionAlgorithm = S3AEncryptionMethods.getMethod(conf.getTrimmed(SERVER_SIDE_ENCRYPTION_ALGORITHM));
        if (S3AEncryptionMethods.SSE_C.equals(serverSideEncryptionAlgorithm) && StringUtils.isBlank(getServerSideEncryptionKey(getConf()))) {
            throw new IOException(Constants.SSE_C_NO_KEY_ERROR);
        }
        if (S3AEncryptionMethods.SSE_S3.equals(serverSideEncryptionAlgorithm) && StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
            throw new IOException(Constants.SSE_S3_WITH_KEY_ERROR);
        }
        LOG.debug("Using encryption {}", serverSideEncryptionAlgorithm);
        inputPolicy = S3AInputPolicy.getPolicy(conf.getTrimmed(INPUT_FADVISE, INPUT_FADV_NORMAL));
        blockUploadEnabled = conf.getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD);
        if (blockUploadEnabled) {
            blockOutputBuffer = conf.getTrimmed(FAST_UPLOAD_BUFFER, DEFAULT_FAST_UPLOAD_BUFFER);
            partSize = ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
            blockFactory = S3ADataBlocks.createFactory(this, blockOutputBuffer);
            blockOutputActiveBlocks = intOption(conf, FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS, 1);
            LOG.debug("Using S3ABlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks);
        } else {
            LOG.debug("Using S3AOutputStream");
        }
    } catch (AmazonClientException e) {
        throw translateException("initializing ", new Path(name), e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) TransferManagerConfiguration(com.amazonaws.services.s3.transfer.TransferManagerConfiguration) GlobalStorageStatistics(org.apache.hadoop.fs.GlobalStorageStatistics) StorageStatistics(org.apache.hadoop.fs.StorageStatistics) AmazonClientException(com.amazonaws.AmazonClientException) PathIOException(org.apache.hadoop.fs.PathIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) GlobalStorageStatistics(org.apache.hadoop.fs.GlobalStorageStatistics)

Example 28 with ThreadPoolExecutor

use of java.util.concurrent.ThreadPoolExecutor in project hadoop by apache.

the class BlockingThreadPoolExecutorService method newInstance.

/**
   * A thread pool that that blocks clients submitting additional tasks if
   * there are already {@code activeTasks} running threads and {@code
   * waitingTasks} tasks waiting in its queue.
   *
   * @param activeTasks maximum number of active tasks
   * @param waitingTasks maximum number of waiting tasks
   * @param keepAliveTime time until threads are cleaned up in {@code unit}
   * @param unit time unit
   * @param prefixName prefix of name for threads
   */
public static BlockingThreadPoolExecutorService newInstance(int activeTasks, int waitingTasks, long keepAliveTime, TimeUnit unit, String prefixName) {
    /* Although we generally only expect up to waitingTasks tasks in the
    queue, we need to be able to buffer all tasks in case dequeueing is
    slower than enqueueing. */
    final BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(waitingTasks + activeTasks);
    ThreadPoolExecutor eventProcessingExecutor = new ThreadPoolExecutor(activeTasks, activeTasks, keepAliveTime, unit, workQueue, newDaemonThreadFactory(prefixName), new RejectedExecutionHandler() {

        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            // This is not expected to happen.
            LOG.error("Could not submit task to executor {}", executor.toString());
        }
    });
    eventProcessingExecutor.allowCoreThreadTimeOut(true);
    return new BlockingThreadPoolExecutorService(waitingTasks + activeTasks, eventProcessingExecutor);
}
Also used : RejectedExecutionHandler(java.util.concurrent.RejectedExecutionHandler) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 29 with ThreadPoolExecutor

use of java.util.concurrent.ThreadPoolExecutor in project hadoop by apache.

the class NMClientAsyncImpl method serviceStart.

@Override
protected void serviceStart() throws Exception {
    client.start();
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d").setDaemon(true).build();
    // Start with a default core-pool size and change it dynamically.
    int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize);
    threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventDispatcherThread = new Thread() {

        @Override
        public void run() {
            ContainerEvent event = null;
            Set<String> allNodes = new HashSet<String>();
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = events.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, thread interrupted", e);
                    }
                    return;
                }
                allNodes.add(event.getNodeId().toString());
                int threadPoolSize = threadPool.getCorePoolSize();
                // limit yet.
                if (threadPoolSize != maxThreadPoolSize) {
                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int nodeNum = allNodes.size();
                    int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum);
                    if (threadPoolSize < idealThreadPoolSize) {
                        // Bump up the pool size to idealThreadPoolSize +
                        // INITIAL_POOL_SIZE, the later is just a buffer so we are not
                        // always increasing the pool-size
                        int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE);
                        LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum);
                        threadPool.setCorePoolSize(newThreadPoolSize);
                    }
                }
                // the events from the queue are handled in parallel with a thread
                // pool
                threadPool.execute(getContainerEventProcessor(event));
            // TODO: Group launching of multiple containers to a single
            // NodeManager into a single connection
            }
        }
    };
    eventDispatcherThread.setName("Container  Event Dispatcher");
    eventDispatcherThread.setDaemon(false);
    eventDispatcherThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) Set(java.util.Set) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 30 with ThreadPoolExecutor

use of java.util.concurrent.ThreadPoolExecutor in project hbase by apache.

the class HBaseInterClusterReplicationEndpoint method init.

@Override
public void init(Context context) throws IOException {
    super.init(context);
    this.conf = HBaseConfiguration.create(ctx.getConfiguration());
    decorateConf();
    this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
    this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier);
    // A Replicator job is bound by the RPC timeout. We will wait this long for all Replicator
    // tasks to terminate when doStop() is called.
    long maxTerminationWaitMultiplier = this.conf.getLong("replication.source.maxterminationmultiplier", DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER);
    this.maxTerminationWait = maxTerminationWaitMultiplier * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
    // TODO: This connection is replication specific or we should make it particular to
    // replication and make replication specific settings such as compression or codec to use
    // passing Cells.
    this.conn = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
    this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
    this.metrics = context.getMetrics();
    // ReplicationQueueInfo parses the peerId out of the znode for us
    this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
    // per sink thread pool
    this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
    this.exec = new ThreadPoolExecutor(maxThreads, maxThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
    this.exec.allowCoreThreadTimeOut(true);
    this.abortable = ctx.getAbortable();
    this.replicationBulkLoadDataEnabled = conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
    if (this.replicationBulkLoadDataEnabled) {
        replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID);
    }
    // Construct base namespace directory and hfile archive directory path
    Path rootDir = FSUtils.getRootDir(conf);
    Path baseNSDir = new Path(HConstants.BASE_NAMESPACE_DIR);
    baseNamespaceDir = new Path(rootDir, baseNSDir);
    hfileArchiveDir = new Path(rootDir, new Path(HConstants.HFILE_ARCHIVE_DIRECTORY, baseNSDir));
}
Also used : Path(org.apache.hadoop.fs.Path) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Aggregations

ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)397 Test (org.junit.Test)79 ExecutorService (java.util.concurrent.ExecutorService)74 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)60 ThreadFactory (java.util.concurrent.ThreadFactory)38 ArrayList (java.util.ArrayList)34 IOException (java.io.IOException)33 ScheduledThreadPoolExecutor (java.util.concurrent.ScheduledThreadPoolExecutor)30 SynchronousQueue (java.util.concurrent.SynchronousQueue)29 ArrayBlockingQueue (java.util.concurrent.ArrayBlockingQueue)23 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)23 RejectedExecutionHandler (java.util.concurrent.RejectedExecutionHandler)22 ExecutionException (java.util.concurrent.ExecutionException)21 Future (java.util.concurrent.Future)20 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)18 CountDownLatch (java.util.concurrent.CountDownLatch)18 Test (org.testng.annotations.Test)18 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)16 SizedScheduledExecutorService (org.apache.camel.util.concurrent.SizedScheduledExecutorService)16 HashMap (java.util.HashMap)14