Search in sources :

Example 11 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.

the class S3AFileSystem method initialize.

/** Called after a new FileSystem instance is constructed.
   * @param name a uri whose authority section names the host, port, etc.
   *   for this FileSystem
   * @param originalConf the configuration to use for the FS. The
   * bucket-specific options are patched over the base ones before any use is
   * made of the config.
   */
public void initialize(URI name, Configuration originalConf) throws IOException {
    uri = S3xLoginHelper.buildFSURI(name);
    // get the host; this is guaranteed to be non-null, non-empty
    bucket = name.getHost();
    // clone the configuration into one with propagated bucket options
    Configuration conf = propagateBucketOptions(originalConf, bucket);
    patchSecurityCredentialProviders(conf);
    super.initialize(name, conf);
    setConf(conf);
    try {
        instrumentation = new S3AInstrumentation(name);
        // Username is the current user at the time the FS was instantiated.
        username = UserGroupInformation.getCurrentUser().getShortUserName();
        workingDir = new Path("/user", username).makeQualified(this.uri, this.getWorkingDirectory());
        Class<? extends S3ClientFactory> s3ClientFactoryClass = conf.getClass(S3_CLIENT_FACTORY_IMPL, DEFAULT_S3_CLIENT_FACTORY_IMPL, S3ClientFactory.class);
        s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf).createS3Client(name, uri);
        maxKeys = intOption(conf, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS, 1);
        listing = new Listing(this);
        partSize = getMultipartSizeProperty(conf, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
        multiPartThreshold = getMultipartSizeProperty(conf, MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
        //check but do not store the block size
        longBytesOption(conf, FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE, 1);
        enableMultiObjectsDelete = conf.getBoolean(ENABLE_MULTI_DELETE, true);
        readAhead = longBytesOption(conf, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE, 0);
        storageStatistics = (S3AStorageStatistics) GlobalStorageStatistics.INSTANCE.put(S3AStorageStatistics.NAME, new GlobalStorageStatistics.StorageStatisticsProvider() {

            @Override
            public StorageStatistics provide() {
                return new S3AStorageStatistics();
            }
        });
        int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
        if (maxThreads < 2) {
            LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
            maxThreads = 2;
        }
        int totalTasks = intOption(conf, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS, 1);
        long keepAliveTime = longOption(conf, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME, 0);
        boundedThreadPool = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks, keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared");
        unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded"));
        initTransferManager();
        initCannedAcls(conf);
        verifyBucketExists();
        initMultipartUploads(conf);
        serverSideEncryptionAlgorithm = S3AEncryptionMethods.getMethod(conf.getTrimmed(SERVER_SIDE_ENCRYPTION_ALGORITHM));
        if (S3AEncryptionMethods.SSE_C.equals(serverSideEncryptionAlgorithm) && StringUtils.isBlank(getServerSideEncryptionKey(getConf()))) {
            throw new IOException(Constants.SSE_C_NO_KEY_ERROR);
        }
        if (S3AEncryptionMethods.SSE_S3.equals(serverSideEncryptionAlgorithm) && StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
            throw new IOException(Constants.SSE_S3_WITH_KEY_ERROR);
        }
        LOG.debug("Using encryption {}", serverSideEncryptionAlgorithm);
        inputPolicy = S3AInputPolicy.getPolicy(conf.getTrimmed(INPUT_FADVISE, INPUT_FADV_NORMAL));
        blockUploadEnabled = conf.getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD);
        if (blockUploadEnabled) {
            blockOutputBuffer = conf.getTrimmed(FAST_UPLOAD_BUFFER, DEFAULT_FAST_UPLOAD_BUFFER);
            partSize = ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
            blockFactory = S3ADataBlocks.createFactory(this, blockOutputBuffer);
            blockOutputActiveBlocks = intOption(conf, FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS, 1);
            LOG.debug("Using S3ABlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks);
        } else {
            LOG.debug("Using S3AOutputStream");
        }
    } catch (AmazonClientException e) {
        throw translateException("initializing ", new Path(name), e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) TransferManagerConfiguration(com.amazonaws.services.s3.transfer.TransferManagerConfiguration) GlobalStorageStatistics(org.apache.hadoop.fs.GlobalStorageStatistics) StorageStatistics(org.apache.hadoop.fs.StorageStatistics) AmazonClientException(com.amazonaws.AmazonClientException) PathIOException(org.apache.hadoop.fs.PathIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) GlobalStorageStatistics(org.apache.hadoop.fs.GlobalStorageStatistics)

Example 12 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.

the class BlockingThreadPoolExecutorService method newInstance.

/**
   * A thread pool that that blocks clients submitting additional tasks if
   * there are already {@code activeTasks} running threads and {@code
   * waitingTasks} tasks waiting in its queue.
   *
   * @param activeTasks maximum number of active tasks
   * @param waitingTasks maximum number of waiting tasks
   * @param keepAliveTime time until threads are cleaned up in {@code unit}
   * @param unit time unit
   * @param prefixName prefix of name for threads
   */
public static BlockingThreadPoolExecutorService newInstance(int activeTasks, int waitingTasks, long keepAliveTime, TimeUnit unit, String prefixName) {
    /* Although we generally only expect up to waitingTasks tasks in the
    queue, we need to be able to buffer all tasks in case dequeueing is
    slower than enqueueing. */
    final BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(waitingTasks + activeTasks);
    ThreadPoolExecutor eventProcessingExecutor = new ThreadPoolExecutor(activeTasks, activeTasks, keepAliveTime, unit, workQueue, newDaemonThreadFactory(prefixName), new RejectedExecutionHandler() {

        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            // This is not expected to happen.
            LOG.error("Could not submit task to executor {}", executor.toString());
        }
    });
    eventProcessingExecutor.allowCoreThreadTimeOut(true);
    return new BlockingThreadPoolExecutorService(waitingTasks + activeTasks, eventProcessingExecutor);
}
Also used : RejectedExecutionHandler(java.util.concurrent.RejectedExecutionHandler) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 13 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.

the class NMClientAsyncImpl method serviceStart.

@Override
protected void serviceStart() throws Exception {
    client.start();
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d").setDaemon(true).build();
    // Start with a default core-pool size and change it dynamically.
    int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize);
    threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventDispatcherThread = new Thread() {

        @Override
        public void run() {
            ContainerEvent event = null;
            Set<String> allNodes = new HashSet<String>();
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = events.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, thread interrupted", e);
                    }
                    return;
                }
                allNodes.add(event.getNodeId().toString());
                int threadPoolSize = threadPool.getCorePoolSize();
                // limit yet.
                if (threadPoolSize != maxThreadPoolSize) {
                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int nodeNum = allNodes.size();
                    int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum);
                    if (threadPoolSize < idealThreadPoolSize) {
                        // Bump up the pool size to idealThreadPoolSize +
                        // INITIAL_POOL_SIZE, the later is just a buffer so we are not
                        // always increasing the pool-size
                        int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE);
                        LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum);
                        threadPool.setCorePoolSize(newThreadPoolSize);
                    }
                }
                // the events from the queue are handled in parallel with a thread
                // pool
                threadPool.execute(getContainerEventProcessor(event));
            // TODO: Group launching of multiple containers to a single
            // NodeManager into a single connection
            }
        }
    };
    eventDispatcherThread.setName("Container  Event Dispatcher");
    eventDispatcherThread.setDaemon(false);
    eventDispatcherThread.start();
    super.serviceStart();
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) Set(java.util.Set) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 14 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.

the class HBaseInterClusterReplicationEndpoint method init.

@Override
public void init(Context context) throws IOException {
    super.init(context);
    this.conf = HBaseConfiguration.create(ctx.getConfiguration());
    decorateConf();
    this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
    this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier);
    // A Replicator job is bound by the RPC timeout. We will wait this long for all Replicator
    // tasks to terminate when doStop() is called.
    long maxTerminationWaitMultiplier = this.conf.getLong("replication.source.maxterminationmultiplier", DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER);
    this.maxTerminationWait = maxTerminationWaitMultiplier * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
    // TODO: This connection is replication specific or we should make it particular to
    // replication and make replication specific settings such as compression or codec to use
    // passing Cells.
    this.conn = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
    this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
    this.metrics = context.getMetrics();
    // ReplicationQueueInfo parses the peerId out of the znode for us
    this.replicationSinkMgr = new ReplicationSinkManager(conn, ctx.getPeerId(), this, this.conf);
    // per sink thread pool
    this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
    this.exec = new ThreadPoolExecutor(maxThreads, maxThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
    this.exec.allowCoreThreadTimeOut(true);
    this.abortable = ctx.getAbortable();
    this.replicationBulkLoadDataEnabled = conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
    if (this.replicationBulkLoadDataEnabled) {
        replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID);
    }
    // Construct base namespace directory and hfile archive directory path
    Path rootDir = FSUtils.getRootDir(conf);
    Path baseNSDir = new Path(HConstants.BASE_NAMESPACE_DIR);
    baseNamespaceDir = new Path(rootDir, baseNSDir);
    hfileArchiveDir = new Path(rootDir, new Path(HConstants.HFILE_ARCHIVE_DIRECTORY, baseNSDir));
}
Also used : Path(org.apache.hadoop.fs.Path) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 15 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.

the class RegionReplicaReplicationEndpoint method getDefaultThreadPool.

/**
   * Returns a Thread pool for the RPC's to region replicas. Similar to
   * Connection's thread pool.
   */
private ExecutorService getDefaultThreadPool(Configuration conf) {
    int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256);
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60);
    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(maxThreads * conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(maxThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory(this.getClass().getSimpleName() + "-rpc-shared-"));
    tpe.allowCoreThreadTimeOut(true);
    return tpe;
}
Also used : ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) HBaseReplicationEndpoint(org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint)

Aggregations

LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)233 Test (org.junit.Test)79 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)58 IOException (java.io.IOException)22 Emitter (io.socket.emitter.Emitter)19 ArrayList (java.util.ArrayList)19 JSONObject (org.json.JSONObject)19 CountDownLatch (java.util.concurrent.CountDownLatch)18 ThreadFactory (java.util.concurrent.ThreadFactory)14 ExecutorService (java.util.concurrent.ExecutorService)13 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)13 URI (java.net.URI)11 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)11 Intent (android.content.Intent)9 BlockingQueue (java.util.concurrent.BlockingQueue)9 HashMap (java.util.HashMap)8 AtomicLong (java.util.concurrent.atomic.AtomicLong)8 PendingIntent (android.app.PendingIntent)7 ComponentName (android.content.ComponentName)7 ServiceConnection (android.content.ServiceConnection)7