Search in sources :

Example 6 with Executor

use of java.util.concurrent.Executor in project hadoop by apache.

the class FsDatasetImpl method cacheBlock.

/**
   * Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
   */
private void cacheBlock(String bpid, long blockId) {
    FsVolumeImpl volume;
    String blockFileName;
    long length, genstamp;
    Executor volumeExecutor;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo info = volumeMap.get(bpid, blockId);
        boolean success = false;
        try {
            if (info == null) {
                LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": ReplicaInfo not found.");
                return;
            }
            if (info.getState() != ReplicaState.FINALIZED) {
                LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": replica is not finalized; it is in state " + info.getState());
                return;
            }
            try {
                volume = (FsVolumeImpl) info.getVolume();
                if (volume == null) {
                    LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": volume not found.");
                    return;
                }
            } catch (ClassCastException e) {
                LOG.warn("Failed to cache block with id " + blockId + ": volume was not an instance of FsVolumeImpl.");
                return;
            }
            if (volume.isTransientStorage()) {
                LOG.warn("Caching not supported on block with id " + blockId + " since the volume is backed by RAM.");
                return;
            }
            success = true;
        } finally {
            if (!success) {
                cacheManager.numBlocksFailedToCache.incrementAndGet();
            }
        }
        blockFileName = info.getBlockURI().toString();
        length = info.getVisibleLength();
        genstamp = info.getGenerationStamp();
        volumeExecutor = volume.getCacheExecutor();
    }
    cacheManager.cacheBlock(blockId, bpid, blockFileName, length, genstamp, volumeExecutor);
}
Also used : Executor(java.util.concurrent.Executor) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 7 with Executor

use of java.util.concurrent.Executor in project hive by apache.

the class TestLowLevelCacheImpl method testMTTWithCleanup.

@Test
public void testMTTWithCleanup() {
    final LowLevelCacheImpl cache = new LowLevelCacheImpl(LlapDaemonCacheMetrics.create("test", "1"), new DummyCachePolicy(), new DummyAllocator(), true, 1);
    final long fn1 = 1, fn2 = 2;
    final int offsetsToUse = 8;
    final CountDownLatch cdlIn = new CountDownLatch(4), cdlOut = new CountDownLatch(1);
    final AtomicInteger rdmsDone = new AtomicInteger(0);
    Callable<Long> rdmCall = new Callable<Long>() {

        public Long call() {
            int gets = 0, puts = 0;
            try {
                Random rdm = new Random(1234 + Thread.currentThread().getId());
                syncThreadStart(cdlIn, cdlOut);
                for (int i = 0; i < 20000; ++i) {
                    boolean isGet = rdm.nextBoolean(), isFn1 = rdm.nextBoolean();
                    long fileName = isFn1 ? fn1 : fn2;
                    int fileIndex = isFn1 ? 1 : 2;
                    int count = rdm.nextInt(offsetsToUse);
                    if (isGet) {
                        int[] offsets = new int[count];
                        count = generateOffsets(offsetsToUse, rdm, offsets);
                        CreateHelper list = new CreateHelper();
                        for (int j = 0; i < count; ++i) {
                            list.addOrMerge(offsets[j], offsets[j] + 1, true, false);
                        }
                        DiskRangeList iter = cache.getFileData(fileName, list.get(), 0, testFactory, null, null);
                        int j = -1;
                        while (iter != null) {
                            ++j;
                            if (!(iter instanceof CacheChunk)) {
                                iter = iter.next;
                                continue;
                            }
                            ++gets;
                            LlapDataBuffer result = (LlapDataBuffer) ((CacheChunk) iter).getBuffer();
                            assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), result.arenaIndex);
                            cache.decRefBuffer(result);
                            iter = iter.next;
                        }
                    } else {
                        DiskRange[] ranges = new DiskRange[count];
                        int[] offsets = new int[count];
                        for (int j = 0; j < count; ++j) {
                            int next = rdm.nextInt(offsetsToUse);
                            ranges[j] = dr(next, next + 1);
                            offsets[j] = next;
                        }
                        MemoryBuffer[] buffers = new MemoryBuffer[count];
                        for (int j = 0; j < offsets.length; ++j) {
                            LlapDataBuffer buf = LowLevelCacheImpl.allocateFake();
                            buf.arenaIndex = makeFakeArenaIndex(fileIndex, offsets[j]);
                            buffers[j] = buf;
                        }
                        long[] mask = cache.putFileData(fileName, ranges, buffers, 0, Priority.NORMAL, null);
                        puts += buffers.length;
                        long maskVal = 0;
                        if (mask != null) {
                            assertEquals(1, mask.length);
                            maskVal = mask[0];
                        }
                        for (int j = 0; j < offsets.length; ++j) {
                            LlapDataBuffer buf = (LlapDataBuffer) (buffers[j]);
                            if ((maskVal & 1) == 1) {
                                assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), buf.arenaIndex);
                            }
                            maskVal >>= 1;
                            cache.decRefBuffer(buf);
                        }
                    }
                }
            } finally {
                rdmsDone.incrementAndGet();
            }
            return (((long) gets) << 32) | puts;
        }

        private int makeFakeArenaIndex(int fileIndex, long offset) {
            return (int) ((fileIndex << 16) + offset);
        }
    };
    FutureTask<Integer> evictionTask = new FutureTask<Integer>(new Callable<Integer>() {

        public Integer call() {
            boolean isFirstFile = false;
            Random rdm = new Random(1234 + Thread.currentThread().getId());
            int evictions = 0;
            syncThreadStart(cdlIn, cdlOut);
            while (rdmsDone.get() < 3) {
                DiskRangeList head = new DiskRangeList(0, offsetsToUse + 1);
                isFirstFile = !isFirstFile;
                long fileId = isFirstFile ? fn1 : fn2;
                head = cache.getFileData(fileId, head, 0, testFactory, null, null);
                DiskRange[] results = head.listToArray();
                int startIndex = rdm.nextInt(results.length), index = startIndex;
                LlapDataBuffer victim = null;
                do {
                    DiskRange r = results[index];
                    if (r instanceof CacheChunk) {
                        LlapDataBuffer result = (LlapDataBuffer) ((CacheChunk) r).getBuffer();
                        cache.decRefBuffer(result);
                        if (victim == null && result.invalidate()) {
                            ++evictions;
                            victim = result;
                        }
                    }
                    ++index;
                    if (index == results.length)
                        index = 0;
                } while (index != startIndex);
                if (victim == null)
                    continue;
                cache.notifyEvicted(victim);
            }
            return evictions;
        }
    });
    FutureTask<Long> rdmTask1 = new FutureTask<Long>(rdmCall), rdmTask2 = new FutureTask<Long>(rdmCall), rdmTask3 = new FutureTask<Long>(rdmCall);
    Executor threadPool = Executors.newFixedThreadPool(4);
    threadPool.execute(rdmTask1);
    threadPool.execute(rdmTask2);
    threadPool.execute(rdmTask3);
    threadPool.execute(evictionTask);
    try {
        cdlIn.await();
        cdlOut.countDown();
        long result1 = rdmTask1.get(), result2 = rdmTask2.get(), result3 = rdmTask3.get();
        int evictions = evictionTask.get();
        LOG.info("MTT test: task 1: " + descRdmTask(result1) + ", task 2: " + descRdmTask(result2) + ", task 3: " + descRdmTask(result3) + "; " + evictions + " evictions");
    } catch (Throwable t) {
        throw new RuntimeException(t);
    }
}
Also used : DiskRangeList(org.apache.hadoop.hive.common.io.DiskRangeList) Callable(java.util.concurrent.Callable) CreateHelper(org.apache.hadoop.hive.common.io.DiskRangeList.CreateHelper) Executor(java.util.concurrent.Executor) Random(java.util.Random) FutureTask(java.util.concurrent.FutureTask) CacheChunk(org.apache.hadoop.hive.ql.io.orc.encoded.CacheChunk) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MemoryBuffer(org.apache.hadoop.hive.common.io.encoded.MemoryBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DiskRange(org.apache.hadoop.hive.common.io.DiskRange) Test(org.junit.Test)

Example 8 with Executor

use of java.util.concurrent.Executor in project tomcat by apache.

the class ThreadLocalLeakPreventionListener method stopIdleThreads.

/**
     * Updates each ThreadPoolExecutor with the current time, which is the time
     * when a context is being stopped.
     *
     * @param context
     *            the context being stopped, used to discover all the Connectors
     *            of its parent Service.
     */
private void stopIdleThreads(Context context) {
    if (serverStopping)
        return;
    if (!(context instanceof StandardContext) || !((StandardContext) context).getRenewThreadsWhenStoppingContext()) {
        log.debug("Not renewing threads when the context is stopping. " + "It is not configured to do it.");
        return;
    }
    Engine engine = (Engine) context.getParent().getParent();
    Service service = engine.getService();
    Connector[] connectors = service.findConnectors();
    if (connectors != null) {
        for (Connector connector : connectors) {
            ProtocolHandler handler = connector.getProtocolHandler();
            Executor executor = null;
            if (handler != null) {
                executor = handler.getExecutor();
            }
            if (executor instanceof ThreadPoolExecutor) {
                ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) executor;
                threadPoolExecutor.contextStopping();
            } else if (executor instanceof StandardThreadExecutor) {
                StandardThreadExecutor stdThreadExecutor = (StandardThreadExecutor) executor;
                stdThreadExecutor.contextStopping();
            }
        }
    }
}
Also used : ProtocolHandler(org.apache.coyote.ProtocolHandler) Connector(org.apache.catalina.connector.Connector) Executor(java.util.concurrent.Executor) ThreadPoolExecutor(org.apache.tomcat.util.threads.ThreadPoolExecutor) Service(org.apache.catalina.Service) ThreadPoolExecutor(org.apache.tomcat.util.threads.ThreadPoolExecutor) Engine(org.apache.catalina.Engine)

Example 9 with Executor

use of java.util.concurrent.Executor in project tomcat by apache.

the class SocketWrapperBase method execute.

/**
     * Transfers processing to a container thread.
     *
     * @param runnable The actions to process on a container thread
     *
     * @throws RejectedExecutionException If the runnable cannot be executed
     */
public void execute(Runnable runnable) {
    Executor executor = endpoint.getExecutor();
    if (!endpoint.isRunning() || executor == null) {
        throw new RejectedExecutionException();
    }
    executor.execute(runnable);
}
Also used : Executor(java.util.concurrent.Executor) RejectedExecutionException(java.util.concurrent.RejectedExecutionException)

Example 10 with Executor

use of java.util.concurrent.Executor in project tomcat by apache.

the class AbstractEndpoint method setMaxThreads.

public void setMaxThreads(int maxThreads) {
    this.maxThreads = maxThreads;
    Executor executor = this.executor;
    if (internalExecutor && executor instanceof java.util.concurrent.ThreadPoolExecutor) {
        // The internal executor should always be an instance of
        // j.u.c.ThreadPoolExecutor but it may be null if the endpoint is
        // not running.
        // This check also avoids various threading issues.
        ((java.util.concurrent.ThreadPoolExecutor) executor).setMaximumPoolSize(maxThreads);
    }
}
Also used : ResizableExecutor(org.apache.tomcat.util.threads.ResizableExecutor) Executor(java.util.concurrent.Executor) ThreadPoolExecutor(org.apache.tomcat.util.threads.ThreadPoolExecutor) ThreadPoolExecutor(org.apache.tomcat.util.threads.ThreadPoolExecutor)

Aggregations

Executor (java.util.concurrent.Executor)267 Test (org.junit.Test)114 CountDownLatch (java.util.concurrent.CountDownLatch)31 ArrayList (java.util.ArrayList)30 IOException (java.io.IOException)28 List (java.util.List)22 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)21 Timeouts (org.neo4j.cluster.timeout.Timeouts)15 Map (java.util.Map)13 ExecutorService (java.util.concurrent.ExecutorService)13 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)13 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)13 InstanceId (org.neo4j.cluster.InstanceId)13 Config (org.neo4j.kernel.configuration.Config)13 File (java.io.File)12 HashMap (java.util.HashMap)12 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)12 ObjectInputStreamFactory (org.neo4j.cluster.protocol.atomicbroadcast.ObjectInputStreamFactory)12 ObjectOutputStreamFactory (org.neo4j.cluster.protocol.atomicbroadcast.ObjectOutputStreamFactory)12 HeartbeatContext (org.neo4j.cluster.protocol.heartbeat.HeartbeatContext)12