Search in sources :

Example 86 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project druid by druid-io.

the class ZkCoordinatorTest method setUp.

@Before
public void setUp() throws Exception {
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    try {
        infoDir = new File(File.createTempFile("blah", "blah2").getParent(), "ZkCoordinatorTest");
        infoDir.mkdirs();
        for (File file : infoDir.listFiles()) {
            file.delete();
        }
        log.info("Creating tmp test files in [%s]", infoDir);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    scheduledRunnable = Lists.newArrayList();
    segmentLoader = new CacheTestSegmentLoader();
    serverManager = new ServerManager(segmentLoader, new NoopQueryRunnerFactoryConglomerate(), new NoopServiceEmitter(), MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor(), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig());
    final ZkPathsConfig zkPaths = new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "/druid";
        }
    };
    segmentsAnnouncedByMe = new ConcurrentSkipListSet<>();
    announceCount = new AtomicInteger(0);
    announcer = new DataSegmentAnnouncer() {

        private final DataSegmentAnnouncer delegate = new BatchDataSegmentAnnouncer(me, new BatchDataSegmentAnnouncerConfig(), zkPaths, new Announcer(curator, Execs.singleThreaded("blah")), jsonMapper);

        @Override
        public void announceSegment(DataSegment segment) throws IOException {
            segmentsAnnouncedByMe.add(segment);
            announceCount.incrementAndGet();
            delegate.announceSegment(segment);
        }

        @Override
        public void unannounceSegment(DataSegment segment) throws IOException {
            segmentsAnnouncedByMe.remove(segment);
            announceCount.decrementAndGet();
            delegate.unannounceSegment(segment);
        }

        @Override
        public void announceSegments(Iterable<DataSegment> segments) throws IOException {
            for (DataSegment segment : segments) {
                segmentsAnnouncedByMe.add(segment);
            }
            announceCount.addAndGet(Iterables.size(segments));
            delegate.announceSegments(segments);
        }

        @Override
        public void unannounceSegments(Iterable<DataSegment> segments) throws IOException {
            for (DataSegment segment : segments) {
                segmentsAnnouncedByMe.remove(segment);
            }
            announceCount.addAndGet(-Iterables.size(segments));
            delegate.unannounceSegments(segments);
        }

        @Override
        public boolean isAnnounced(DataSegment segment) {
            return segmentsAnnouncedByMe.contains(segment);
        }
    };
    zkCoordinator = new ZkCoordinator(jsonMapper, new SegmentLoaderConfig() {

        @Override
        public File getInfoDir() {
            return infoDir;
        }

        @Override
        public int getNumLoadingThreads() {
            return 5;
        }

        @Override
        public int getAnnounceIntervalMillis() {
            return 50;
        }

        @Override
        public int getDropSegmentDelayMillis() {
            return 0;
        }
    }, zkPaths, me, announcer, curator, serverManager, new ScheduledExecutorFactory() {

        @Override
        public ScheduledExecutorService create(int corePoolSize, String nameFormat) {
            /*
               Override normal behavoir by adding the runnable to a list so that you can make sure
               all the shceduled runnables are executed by explicitly calling run() on each item in the list
             */
            return new ScheduledThreadPoolExecutor(corePoolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(nameFormat).build()) {

                @Override
                public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
                    scheduledRunnable.add(command);
                    return null;
                }
            };
        }
    });
}
Also used : ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) BatchDataSegmentAnnouncerConfig(io.druid.server.initialization.BatchDataSegmentAnnouncerConfig) DataSegment(io.druid.timeline.DataSegment) Announcer(io.druid.curator.announcement.Announcer) ZkPathsConfig(io.druid.server.initialization.ZkPathsConfig) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) TimeUnit(java.util.concurrent.TimeUnit) SegmentLoaderConfig(io.druid.segment.loading.SegmentLoaderConfig) CacheConfig(io.druid.client.cache.CacheConfig) CacheTestSegmentLoader(io.druid.segment.loading.CacheTestSegmentLoader) NoopQueryRunnerFactoryConglomerate(io.druid.query.NoopQueryRunnerFactoryConglomerate) NoopServiceEmitter(io.druid.server.metrics.NoopServiceEmitter) IOException(java.io.IOException) ScheduledExecutorFactory(io.druid.java.util.common.concurrent.ScheduledExecutorFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) LocalCacheProvider(io.druid.client.cache.LocalCacheProvider) File(java.io.File) Before(org.junit.Before)

Example 87 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class LoadIncrementalHFiles method createExecutorService.

// Initialize a thread pool
private ExecutorService createExecutorService() {
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
    return pool;
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 88 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hadoop by apache.

the class DelegationTokenRenewer method createNewThreadPoolService.

protected ThreadPoolExecutor createNewThreadPoolService(Configuration conf) {
    int nThreads = conf.getInt(YarnConfiguration.RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT, YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT);
    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("DelegationTokenRenewer #%d").build();
    ThreadPoolExecutor pool = new ThreadPoolExecutor(nThreads, nThreads, 3L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
    pool.setThreadFactory(tf);
    pool.allowCoreThreadTimeOut(true);
    return pool;
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 89 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class ReplicationTableBase method setUpExecutor.

/**
   * Sets up the thread pool executor used to build the Replication Table in the background
   * @return the configured executor
   */
private Executor setUpExecutor() {
    ThreadPoolExecutor tempExecutor = new ThreadPoolExecutor(NUM_INITIALIZE_WORKERS, NUM_INITIALIZE_WORKERS, 100, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>());
    ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
    tfb.setNameFormat("ReplicationTableExecutor-%d");
    tfb.setDaemon(true);
    tempExecutor.setThreadFactory(tfb.build());
    return tempExecutor;
}
Also used : ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 90 with ThreadFactoryBuilder

use of com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class AsyncFSOutputHelper method createOutput.

/**
   * Create {@link FanOutOneBlockAsyncDFSOutput} for {@link DistributedFileSystem}, and a simple
   * implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}.
   */
public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite, boolean createParent, short replication, long blockSize, final EventLoop eventLoop) throws IOException {
    if (fs instanceof DistributedFileSystem) {
        return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f, overwrite, createParent, replication, blockSize, eventLoop);
    }
    final FSDataOutputStream fsOut;
    int bufferSize = fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
    if (createParent) {
        fsOut = fs.create(f, overwrite, bufferSize, replication, blockSize, null);
    } else {
        fsOut = fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, null);
    }
    final ExecutorService flushExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("AsyncFSOutputFlusher-" + f.toString().replace("%", "%%")).build());
    return new AsyncFSOutput() {

        private final ByteArrayOutputStream out = new ByteArrayOutputStream();

        @Override
        public void write(final byte[] b, final int off, final int len) {
            if (eventLoop.inEventLoop()) {
                out.write(b, off, len);
            } else {
                eventLoop.submit(() -> out.write(b, off, len)).syncUninterruptibly();
            }
        }

        @Override
        public void write(byte[] b) {
            write(b, 0, b.length);
        }

        @Override
        public void recoverAndClose(CancelableProgressable reporter) throws IOException {
            fsOut.close();
        }

        @Override
        public DatanodeInfo[] getPipeline() {
            return new DatanodeInfo[0];
        }

        private void flush0(CompletableFuture<Long> future, boolean sync) {
            try {
                synchronized (out) {
                    fsOut.write(out.getBuffer(), 0, out.size());
                    out.reset();
                }
            } catch (IOException e) {
                eventLoop.execute(() -> future.completeExceptionally(e));
                return;
            }
            try {
                if (sync) {
                    fsOut.hsync();
                } else {
                    fsOut.hflush();
                }
                long pos = fsOut.getPos();
                eventLoop.execute(() -> future.complete(pos));
            } catch (IOException e) {
                eventLoop.execute(() -> future.completeExceptionally(e));
            }
        }

        @Override
        public CompletableFuture<Long> flush(boolean sync) {
            CompletableFuture<Long> future = new CompletableFuture<>();
            flushExecutor.execute(() -> flush0(future, sync));
            return future;
        }

        @Override
        public void close() throws IOException {
            try {
                flushExecutor.submit(() -> {
                    synchronized (out) {
                        fsOut.write(out.getBuffer(), 0, out.size());
                        out.reset();
                    }
                    return null;
                }).get();
            } catch (InterruptedException e) {
                throw new InterruptedIOException();
            } catch (ExecutionException e) {
                Throwables.propagateIfPossible(e.getCause(), IOException.class);
                throw new IOException(e.getCause());
            } finally {
                flushExecutor.shutdown();
            }
            fsOut.close();
        }

        @Override
        public int buffered() {
            return out.size();
        }

        @Override
        public void writeInt(int i) {
            out.writeInt(i);
        }

        @Override
        public void write(ByteBuffer bb) {
            out.write(bb, bb.position(), bb.remaining());
        }
    };
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ByteArrayOutputStream(org.apache.hadoop.hbase.io.ByteArrayOutputStream) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CancelableProgressable(org.apache.hadoop.hbase.util.CancelableProgressable) ByteBuffer(java.nio.ByteBuffer) CompletableFuture(java.util.concurrent.CompletableFuture) ExecutorService(java.util.concurrent.ExecutorService) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)143 ExecutorService (java.util.concurrent.ExecutorService)49 ThreadFactory (java.util.concurrent.ThreadFactory)46 IOException (java.io.IOException)23 Future (java.util.concurrent.Future)19 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)19 ExecutionException (java.util.concurrent.ExecutionException)17 ArrayList (java.util.ArrayList)15 Callable (java.util.concurrent.Callable)12 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)12 HashMap (java.util.HashMap)11 Path (org.apache.hadoop.fs.Path)11 LinkedList (java.util.LinkedList)10 Map (java.util.Map)10 HashSet (java.util.HashSet)9 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)9 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)9 Test (org.junit.Test)9 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)8 Before (org.junit.Before)8