use of java.util.concurrent.Executor in project hadoop by apache.
the class FsDatasetImpl method cacheBlock.
/**
* Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
*/
private void cacheBlock(String bpid, long blockId) {
FsVolumeImpl volume;
String blockFileName;
long length, genstamp;
Executor volumeExecutor;
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo info = volumeMap.get(bpid, blockId);
boolean success = false;
try {
if (info == null) {
LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": ReplicaInfo not found.");
return;
}
if (info.getState() != ReplicaState.FINALIZED) {
LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": replica is not finalized; it is in state " + info.getState());
return;
}
try {
volume = (FsVolumeImpl) info.getVolume();
if (volume == null) {
LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": volume not found.");
return;
}
} catch (ClassCastException e) {
LOG.warn("Failed to cache block with id " + blockId + ": volume was not an instance of FsVolumeImpl.");
return;
}
if (volume.isTransientStorage()) {
LOG.warn("Caching not supported on block with id " + blockId + " since the volume is backed by RAM.");
return;
}
success = true;
} finally {
if (!success) {
cacheManager.numBlocksFailedToCache.incrementAndGet();
}
}
blockFileName = info.getBlockURI().toString();
length = info.getVisibleLength();
genstamp = info.getGenerationStamp();
volumeExecutor = volume.getCacheExecutor();
}
cacheManager.cacheBlock(blockId, bpid, blockFileName, length, genstamp, volumeExecutor);
}
use of java.util.concurrent.Executor in project hive by apache.
the class TestLowLevelCacheImpl method testMTTWithCleanup.
@Test
public void testMTTWithCleanup() {
final LowLevelCacheImpl cache = new LowLevelCacheImpl(LlapDaemonCacheMetrics.create("test", "1"), new DummyCachePolicy(), new DummyAllocator(), true, 1);
final long fn1 = 1, fn2 = 2;
final int offsetsToUse = 8;
final CountDownLatch cdlIn = new CountDownLatch(4), cdlOut = new CountDownLatch(1);
final AtomicInteger rdmsDone = new AtomicInteger(0);
Callable<Long> rdmCall = new Callable<Long>() {
public Long call() {
int gets = 0, puts = 0;
try {
Random rdm = new Random(1234 + Thread.currentThread().getId());
syncThreadStart(cdlIn, cdlOut);
for (int i = 0; i < 20000; ++i) {
boolean isGet = rdm.nextBoolean(), isFn1 = rdm.nextBoolean();
long fileName = isFn1 ? fn1 : fn2;
int fileIndex = isFn1 ? 1 : 2;
int count = rdm.nextInt(offsetsToUse);
if (isGet) {
int[] offsets = new int[count];
count = generateOffsets(offsetsToUse, rdm, offsets);
CreateHelper list = new CreateHelper();
for (int j = 0; i < count; ++i) {
list.addOrMerge(offsets[j], offsets[j] + 1, true, false);
}
DiskRangeList iter = cache.getFileData(fileName, list.get(), 0, testFactory, null, null);
int j = -1;
while (iter != null) {
++j;
if (!(iter instanceof CacheChunk)) {
iter = iter.next;
continue;
}
++gets;
LlapDataBuffer result = (LlapDataBuffer) ((CacheChunk) iter).getBuffer();
assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), result.arenaIndex);
cache.decRefBuffer(result);
iter = iter.next;
}
} else {
DiskRange[] ranges = new DiskRange[count];
int[] offsets = new int[count];
for (int j = 0; j < count; ++j) {
int next = rdm.nextInt(offsetsToUse);
ranges[j] = dr(next, next + 1);
offsets[j] = next;
}
MemoryBuffer[] buffers = new MemoryBuffer[count];
for (int j = 0; j < offsets.length; ++j) {
LlapDataBuffer buf = LowLevelCacheImpl.allocateFake();
buf.arenaIndex = makeFakeArenaIndex(fileIndex, offsets[j]);
buffers[j] = buf;
}
long[] mask = cache.putFileData(fileName, ranges, buffers, 0, Priority.NORMAL, null);
puts += buffers.length;
long maskVal = 0;
if (mask != null) {
assertEquals(1, mask.length);
maskVal = mask[0];
}
for (int j = 0; j < offsets.length; ++j) {
LlapDataBuffer buf = (LlapDataBuffer) (buffers[j]);
if ((maskVal & 1) == 1) {
assertEquals(makeFakeArenaIndex(fileIndex, offsets[j]), buf.arenaIndex);
}
maskVal >>= 1;
cache.decRefBuffer(buf);
}
}
}
} finally {
rdmsDone.incrementAndGet();
}
return (((long) gets) << 32) | puts;
}
private int makeFakeArenaIndex(int fileIndex, long offset) {
return (int) ((fileIndex << 16) + offset);
}
};
FutureTask<Integer> evictionTask = new FutureTask<Integer>(new Callable<Integer>() {
public Integer call() {
boolean isFirstFile = false;
Random rdm = new Random(1234 + Thread.currentThread().getId());
int evictions = 0;
syncThreadStart(cdlIn, cdlOut);
while (rdmsDone.get() < 3) {
DiskRangeList head = new DiskRangeList(0, offsetsToUse + 1);
isFirstFile = !isFirstFile;
long fileId = isFirstFile ? fn1 : fn2;
head = cache.getFileData(fileId, head, 0, testFactory, null, null);
DiskRange[] results = head.listToArray();
int startIndex = rdm.nextInt(results.length), index = startIndex;
LlapDataBuffer victim = null;
do {
DiskRange r = results[index];
if (r instanceof CacheChunk) {
LlapDataBuffer result = (LlapDataBuffer) ((CacheChunk) r).getBuffer();
cache.decRefBuffer(result);
if (victim == null && result.invalidate()) {
++evictions;
victim = result;
}
}
++index;
if (index == results.length)
index = 0;
} while (index != startIndex);
if (victim == null)
continue;
cache.notifyEvicted(victim);
}
return evictions;
}
});
FutureTask<Long> rdmTask1 = new FutureTask<Long>(rdmCall), rdmTask2 = new FutureTask<Long>(rdmCall), rdmTask3 = new FutureTask<Long>(rdmCall);
Executor threadPool = Executors.newFixedThreadPool(4);
threadPool.execute(rdmTask1);
threadPool.execute(rdmTask2);
threadPool.execute(rdmTask3);
threadPool.execute(evictionTask);
try {
cdlIn.await();
cdlOut.countDown();
long result1 = rdmTask1.get(), result2 = rdmTask2.get(), result3 = rdmTask3.get();
int evictions = evictionTask.get();
LOG.info("MTT test: task 1: " + descRdmTask(result1) + ", task 2: " + descRdmTask(result2) + ", task 3: " + descRdmTask(result3) + "; " + evictions + " evictions");
} catch (Throwable t) {
throw new RuntimeException(t);
}
}
use of java.util.concurrent.Executor in project tomcat by apache.
the class ThreadLocalLeakPreventionListener method stopIdleThreads.
/**
* Updates each ThreadPoolExecutor with the current time, which is the time
* when a context is being stopped.
*
* @param context
* the context being stopped, used to discover all the Connectors
* of its parent Service.
*/
private void stopIdleThreads(Context context) {
if (serverStopping)
return;
if (!(context instanceof StandardContext) || !((StandardContext) context).getRenewThreadsWhenStoppingContext()) {
log.debug("Not renewing threads when the context is stopping. " + "It is not configured to do it.");
return;
}
Engine engine = (Engine) context.getParent().getParent();
Service service = engine.getService();
Connector[] connectors = service.findConnectors();
if (connectors != null) {
for (Connector connector : connectors) {
ProtocolHandler handler = connector.getProtocolHandler();
Executor executor = null;
if (handler != null) {
executor = handler.getExecutor();
}
if (executor instanceof ThreadPoolExecutor) {
ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) executor;
threadPoolExecutor.contextStopping();
} else if (executor instanceof StandardThreadExecutor) {
StandardThreadExecutor stdThreadExecutor = (StandardThreadExecutor) executor;
stdThreadExecutor.contextStopping();
}
}
}
}
use of java.util.concurrent.Executor in project tomcat by apache.
the class SocketWrapperBase method execute.
/**
* Transfers processing to a container thread.
*
* @param runnable The actions to process on a container thread
*
* @throws RejectedExecutionException If the runnable cannot be executed
*/
public void execute(Runnable runnable) {
Executor executor = endpoint.getExecutor();
if (!endpoint.isRunning() || executor == null) {
throw new RejectedExecutionException();
}
executor.execute(runnable);
}
use of java.util.concurrent.Executor in project tomcat by apache.
the class AbstractEndpoint method setMaxThreads.
public void setMaxThreads(int maxThreads) {
this.maxThreads = maxThreads;
Executor executor = this.executor;
if (internalExecutor && executor instanceof java.util.concurrent.ThreadPoolExecutor) {
// The internal executor should always be an instance of
// j.u.c.ThreadPoolExecutor but it may be null if the endpoint is
// not running.
// This check also avoids various threading issues.
((java.util.concurrent.ThreadPoolExecutor) executor).setMaximumPoolSize(maxThreads);
}
}
Aggregations