use of java.util.concurrent.locks.ReadWriteLock in project hadoop by apache.
the class TestLogAggregationService method testFixedSizeThreadPool.
@Test(timeout = 30000)
public void testFixedSizeThreadPool() throws Exception {
// store configured thread pool size temporarily for restoration
int initThreadPoolSize = conf.getInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, YarnConfiguration.DEFAULT_NM_LOG_AGGREGATION_THREAD_POOL_SIZE);
int threadPoolSize = 3;
conf.setInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, threadPoolSize);
DeletionService delSrvc = mock(DeletionService.class);
LocalDirsHandlerService dirSvc = mock(LocalDirsHandlerService.class);
when(dirSvc.getLogDirs()).thenThrow(new RuntimeException());
LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, delSrvc, dirSvc);
logAggregationService.init(this.conf);
logAggregationService.start();
ExecutorService executorService = logAggregationService.threadPool;
// used to block threads in the thread pool because main thread always
// acquires the write lock first.
final ReadWriteLock rwLock = new ReentrantReadWriteLock();
final Lock rLock = rwLock.readLock();
final Lock wLock = rwLock.writeLock();
try {
wLock.lock();
Runnable runnable = new Runnable() {
@Override
public void run() {
try {
// threads in the thread pool running this will be blocked
rLock.tryLock(35000, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
rLock.unlock();
}
}
};
// created in the thread pool, each of which is blocked on the read lock.
for (int i = 0; i < threadPoolSize + 1; i++) {
executorService.submit(runnable);
}
// count the number of current running LogAggregationService threads
int runningThread = ((ThreadPoolExecutor) executorService).getActiveCount();
assertEquals(threadPoolSize, runningThread);
} finally {
wLock.unlock();
}
logAggregationService.stop();
logAggregationService.close();
// restore the original configurations to avoid side effects
conf.setInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, initThreadPoolSize);
}
use of java.util.concurrent.locks.ReadWriteLock in project hive by apache.
the class QueryTracker method queryComplete.
/**
* Register completion for a query
* @param queryIdentifier
* @param deleteDelay
*/
QueryInfo queryComplete(QueryIdentifier queryIdentifier, long deleteDelay, boolean isInternal) throws IOException {
if (deleteDelay == -1) {
deleteDelay = defaultDeleteDelaySeconds;
}
ReadWriteLock dagLock = getDagLock(queryIdentifier);
dagLock.writeLock().lock();
try {
QueryInfo queryInfo = isInternal ? queryInfoMap.get(queryIdentifier) : checkPermissionsAndGetQuery(queryIdentifier);
rememberCompletedDag(queryIdentifier);
LOG.info("Processing queryComplete for queryIdentifier={} with deleteDelay={} seconds", queryIdentifier, deleteDelay);
queryInfoMap.remove(queryIdentifier);
if (queryInfo == null) {
// Should not happen.
LOG.warn("Ignoring query complete for unknown dag: {}", queryIdentifier);
return null;
}
String[] localDirs = queryInfo.getLocalDirsNoCreate();
if (localDirs != null) {
for (String localDir : localDirs) {
cleanupDir(localDir, deleteDelay);
ShuffleHandler.get().unregisterDag(localDir, queryInfo.getAppIdString(), queryInfo.getDagIdentifier());
}
}
if (routeBasedLoggingEnabled) {
// Inform the routing purgePolicy.
// Send out a fake log message at the ERROR level with the MDC for this query setup. With an
// LLAP custom appender this message will not be logged.
final String dagId = queryInfo.getDagIdString();
final String queryId = queryInfo.getHiveQueryIdString();
MDC.put("dagId", dagId);
MDC.put("queryId", queryId);
try {
LOG.error(QUERY_COMPLETE_MARKER, "Ignore this. Log line to interact with logger." + " Query complete: " + queryInfo.getHiveQueryIdString() + ", " + queryInfo.getDagIdString());
} finally {
MDC.clear();
}
}
// Clearing this before sending a kill is OK, since canFinish will change to false.
// Ideally this should be a state machine where kills are issued to the executor,
// and the structures are cleaned up once all tasks complete. New requests, however,
// should not be allowed after a query complete is received.
sourceCompletionMap.remove(queryIdentifier);
String savedQueryId = queryIdentifierToHiveQueryId.remove(queryIdentifier);
dagSpecificLocks.remove(queryIdentifier);
if (savedQueryId != null) {
ObjectCacheFactory.removeLlapQueryCache(savedQueryId);
}
return queryInfo;
} finally {
dagLock.writeLock().unlock();
}
}
use of java.util.concurrent.locks.ReadWriteLock in project orientdb by orientechnologies.
the class OPartitionedLockManager method acquireExclusiveLock.
@Override
public Lock acquireExclusiveLock(T value) {
final int index;
if (value == null)
index = 0;
else
index = index(value.hashCode());
if (useSpinLock) {
OReadersWriterSpinLock spinLock = spinLocks[index];
spinLock.acquireWriteLock();
return new SpinLockWrapper(false, spinLock);
}
final ReadWriteLock rwLock = locks[index];
final Lock lock = rwLock.writeLock();
lock.lock();
return lock;
}
use of java.util.concurrent.locks.ReadWriteLock in project orientdb by orientechnologies.
the class OPartitionedLockManager method releaseSharedLock.
public void releaseSharedLock(final T value) {
final int index;
if (value == null)
index = 0;
else
index = index(value.hashCode());
if (useSpinLock) {
OReadersWriterSpinLock spinLock = spinLocks[index];
spinLock.releaseReadLock();
return;
}
final ReadWriteLock rwLock = locks[index];
final Lock lock = rwLock.readLock();
lock.unlock();
}
use of java.util.concurrent.locks.ReadWriteLock in project orientdb by orientechnologies.
the class OPartitionedLockManager method tryAcquireExclusiveLock.
public boolean tryAcquireExclusiveLock(final T value, final long timeout) throws InterruptedException {
if (useSpinLock)
throw new IllegalStateException("Spin lock does not support try lock mode");
final int index;
if (value == null)
index = 0;
else
index = index(value.hashCode());
final ReadWriteLock rwLock = locks[index];
final Lock lock = rwLock.writeLock();
return lock.tryLock(timeout, TimeUnit.MILLISECONDS);
}
Aggregations