Search in sources :

Example 51 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project mapdb by jankotek.

the class LinkedBlockingQueueTest method testRemove.

/**
     * remove removes next element, or throws NSEE if empty
     */
public void testRemove() {
    LinkedBlockingQueue q = populatedQueue(SIZE);
    for (int i = 0; i < SIZE; ++i) {
        assertEquals(i, q.remove());
    }
    try {
        q.remove();
        shouldThrow();
    } catch (NoSuchElementException success) {
    }
}
Also used : LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) NoSuchElementException(java.util.NoSuchElementException)

Example 52 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.

the class TestAsyncDispatcher method testDrainDispatcherDrainEventsOnStop.

// Test if drain dispatcher drains events on stop.
@SuppressWarnings({ "rawtypes" })
@Test(timeout = 10000)
public void testDrainDispatcherDrainEventsOnStop() throws Exception {
    YarnConfiguration conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 2000);
    BlockingQueue<Event> queue = new LinkedBlockingQueue<Event>();
    DrainDispatcher disp = new DrainDispatcher(queue);
    disp.init(conf);
    disp.register(DummyType.class, new DummyHandler());
    disp.setDrainEventsOnStop();
    disp.start();
    disp.waitForEventThreadToWait();
    dispatchDummyEvents(disp, 2);
    disp.close();
    assertEquals(0, queue.size());
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Test(org.junit.Test)

Example 53 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.

the class S3AFileSystem method initialize.

/** Called after a new FileSystem instance is constructed.
   * @param name a uri whose authority section names the host, port, etc.
   *   for this FileSystem
   * @param originalConf the configuration to use for the FS. The
   * bucket-specific options are patched over the base ones before any use is
   * made of the config.
   */
public void initialize(URI name, Configuration originalConf) throws IOException {
    uri = S3xLoginHelper.buildFSURI(name);
    // get the host; this is guaranteed to be non-null, non-empty
    bucket = name.getHost();
    // clone the configuration into one with propagated bucket options
    Configuration conf = propagateBucketOptions(originalConf, bucket);
    patchSecurityCredentialProviders(conf);
    super.initialize(name, conf);
    setConf(conf);
    try {
        instrumentation = new S3AInstrumentation(name);
        // Username is the current user at the time the FS was instantiated.
        username = UserGroupInformation.getCurrentUser().getShortUserName();
        workingDir = new Path("/user", username).makeQualified(this.uri, this.getWorkingDirectory());
        Class<? extends S3ClientFactory> s3ClientFactoryClass = conf.getClass(S3_CLIENT_FACTORY_IMPL, DEFAULT_S3_CLIENT_FACTORY_IMPL, S3ClientFactory.class);
        s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf).createS3Client(name, uri);
        maxKeys = intOption(conf, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS, 1);
        listing = new Listing(this);
        partSize = getMultipartSizeProperty(conf, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
        multiPartThreshold = getMultipartSizeProperty(conf, MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
        //check but do not store the block size
        longBytesOption(conf, FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE, 1);
        enableMultiObjectsDelete = conf.getBoolean(ENABLE_MULTI_DELETE, true);
        readAhead = longBytesOption(conf, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE, 0);
        storageStatistics = (S3AStorageStatistics) GlobalStorageStatistics.INSTANCE.put(S3AStorageStatistics.NAME, new GlobalStorageStatistics.StorageStatisticsProvider() {

            @Override
            public StorageStatistics provide() {
                return new S3AStorageStatistics();
            }
        });
        int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
        if (maxThreads < 2) {
            LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
            maxThreads = 2;
        }
        int totalTasks = intOption(conf, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS, 1);
        long keepAliveTime = longOption(conf, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME, 0);
        boundedThreadPool = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks, keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared");
        unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded"));
        initTransferManager();
        initCannedAcls(conf);
        verifyBucketExists();
        initMultipartUploads(conf);
        serverSideEncryptionAlgorithm = S3AEncryptionMethods.getMethod(conf.getTrimmed(SERVER_SIDE_ENCRYPTION_ALGORITHM));
        if (S3AEncryptionMethods.SSE_C.equals(serverSideEncryptionAlgorithm) && StringUtils.isBlank(getServerSideEncryptionKey(getConf()))) {
            throw new IOException(Constants.SSE_C_NO_KEY_ERROR);
        }
        if (S3AEncryptionMethods.SSE_S3.equals(serverSideEncryptionAlgorithm) && StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
            throw new IOException(Constants.SSE_S3_WITH_KEY_ERROR);
        }
        LOG.debug("Using encryption {}", serverSideEncryptionAlgorithm);
        inputPolicy = S3AInputPolicy.getPolicy(conf.getTrimmed(INPUT_FADVISE, INPUT_FADV_NORMAL));
        blockUploadEnabled = conf.getBoolean(FAST_UPLOAD, DEFAULT_FAST_UPLOAD);
        if (blockUploadEnabled) {
            blockOutputBuffer = conf.getTrimmed(FAST_UPLOAD_BUFFER, DEFAULT_FAST_UPLOAD_BUFFER);
            partSize = ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
            blockFactory = S3ADataBlocks.createFactory(this, blockOutputBuffer);
            blockOutputActiveBlocks = intOption(conf, FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS, 1);
            LOG.debug("Using S3ABlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks);
        } else {
            LOG.debug("Using S3AOutputStream");
        }
    } catch (AmazonClientException e) {
        throw translateException("initializing ", new Path(name), e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) TransferManagerConfiguration(com.amazonaws.services.s3.transfer.TransferManagerConfiguration) GlobalStorageStatistics(org.apache.hadoop.fs.GlobalStorageStatistics) StorageStatistics(org.apache.hadoop.fs.StorageStatistics) AmazonClientException(com.amazonaws.AmazonClientException) PathIOException(org.apache.hadoop.fs.PathIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) GlobalStorageStatistics(org.apache.hadoop.fs.GlobalStorageStatistics)

Example 54 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hadoop by apache.

the class BlockingThreadPoolExecutorService method newInstance.

/**
   * A thread pool that that blocks clients submitting additional tasks if
   * there are already {@code activeTasks} running threads and {@code
   * waitingTasks} tasks waiting in its queue.
   *
   * @param activeTasks maximum number of active tasks
   * @param waitingTasks maximum number of waiting tasks
   * @param keepAliveTime time until threads are cleaned up in {@code unit}
   * @param unit time unit
   * @param prefixName prefix of name for threads
   */
public static BlockingThreadPoolExecutorService newInstance(int activeTasks, int waitingTasks, long keepAliveTime, TimeUnit unit, String prefixName) {
    /* Although we generally only expect up to waitingTasks tasks in the
    queue, we need to be able to buffer all tasks in case dequeueing is
    slower than enqueueing. */
    final BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(waitingTasks + activeTasks);
    ThreadPoolExecutor eventProcessingExecutor = new ThreadPoolExecutor(activeTasks, activeTasks, keepAliveTime, unit, workQueue, newDaemonThreadFactory(prefixName), new RejectedExecutionHandler() {

        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            // This is not expected to happen.
            LOG.error("Could not submit task to executor {}", executor.toString());
        }
    });
    eventProcessingExecutor.allowCoreThreadTimeOut(true);
    return new BlockingThreadPoolExecutorService(waitingTasks + activeTasks, eventProcessingExecutor);
}
Also used : RejectedExecutionHandler(java.util.concurrent.RejectedExecutionHandler) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue)

Example 55 with LinkedBlockingQueue

use of java.util.concurrent.LinkedBlockingQueue in project hbase by apache.

the class IntegrationTestSendTraceRequests method insertData.

private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException {
    LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
    BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
    byte[] value = new byte[300];
    for (int x = 0; x < 5000; x++) {
        TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
        try {
            for (int i = 0; i < 5; i++) {
                long rk = random.nextLong();
                rowKeys.add(rk);
                Put p = new Put(Bytes.toBytes(rk));
                for (int y = 0; y < 10; y++) {
                    random.nextBytes(value);
                    p.addColumn(familyName, Bytes.toBytes(random.nextLong()), value);
                }
                ht.mutate(p);
            }
            if ((x % 1000) == 0) {
                admin.flush(tableName);
            }
        } finally {
            traceScope.close();
        }
    }
    admin.flush(tableName);
    return rowKeys;
}
Also used : BufferedMutator(org.apache.hadoop.hbase.client.BufferedMutator) TraceScope(org.apache.htrace.TraceScope) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)259 Test (org.junit.Test)91 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)64 IOException (java.io.IOException)26 ArrayList (java.util.ArrayList)23 Emitter (io.socket.emitter.Emitter)19 JSONObject (org.json.JSONObject)19 CountDownLatch (java.util.concurrent.CountDownLatch)18 ThreadFactory (java.util.concurrent.ThreadFactory)16 ExecutorService (java.util.concurrent.ExecutorService)14 BlockingQueue (java.util.concurrent.BlockingQueue)13 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)13 List (java.util.List)12 URI (java.net.URI)11 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)11 Intent (android.content.Intent)9 HashMap (java.util.HashMap)9 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)8 Map (java.util.Map)8 UUID (java.util.UUID)8