Search in sources :

Example 1 with ReadWriteLock

use of java.util.concurrent.locks.ReadWriteLock in project hadoop by apache.

the class TestLogAggregationService method testFixedSizeThreadPool.

@Test(timeout = 30000)
public void testFixedSizeThreadPool() throws Exception {
    // store configured thread pool size temporarily for restoration
    int initThreadPoolSize = conf.getInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, YarnConfiguration.DEFAULT_NM_LOG_AGGREGATION_THREAD_POOL_SIZE);
    int threadPoolSize = 3;
    conf.setInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, threadPoolSize);
    DeletionService delSrvc = mock(DeletionService.class);
    LocalDirsHandlerService dirSvc = mock(LocalDirsHandlerService.class);
    when(dirSvc.getLogDirs()).thenThrow(new RuntimeException());
    LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, delSrvc, dirSvc);
    logAggregationService.init(this.conf);
    logAggregationService.start();
    ExecutorService executorService = logAggregationService.threadPool;
    // used to block threads in the thread pool because main thread always
    // acquires the write lock first.
    final ReadWriteLock rwLock = new ReentrantReadWriteLock();
    final Lock rLock = rwLock.readLock();
    final Lock wLock = rwLock.writeLock();
    try {
        wLock.lock();
        Runnable runnable = new Runnable() {

            @Override
            public void run() {
                try {
                    // threads in the thread pool running this will be blocked
                    rLock.tryLock(35000, TimeUnit.MILLISECONDS);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } finally {
                    rLock.unlock();
                }
            }
        };
        // created in the thread pool, each of which is blocked on the read lock.
        for (int i = 0; i < threadPoolSize + 1; i++) {
            executorService.submit(runnable);
        }
        // count the number of current running LogAggregationService threads
        int runningThread = ((ThreadPoolExecutor) executorService).getActiveCount();
        assertEquals(threadPoolSize, runningThread);
    } finally {
        wLock.unlock();
    }
    logAggregationService.stop();
    logAggregationService.close();
    // restore the original configurations to avoid side effects
    conf.setInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, initThreadPoolSize);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) ExecutorService(java.util.concurrent.ExecutorService) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Lock(java.util.concurrent.locks.Lock) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 2 with ReadWriteLock

use of java.util.concurrent.locks.ReadWriteLock in project hive by apache.

the class QueryTracker method queryComplete.

/**
   * Register completion for a query
   * @param queryIdentifier
   * @param deleteDelay
   */
QueryInfo queryComplete(QueryIdentifier queryIdentifier, long deleteDelay, boolean isInternal) throws IOException {
    if (deleteDelay == -1) {
        deleteDelay = defaultDeleteDelaySeconds;
    }
    ReadWriteLock dagLock = getDagLock(queryIdentifier);
    dagLock.writeLock().lock();
    try {
        QueryInfo queryInfo = isInternal ? queryInfoMap.get(queryIdentifier) : checkPermissionsAndGetQuery(queryIdentifier);
        rememberCompletedDag(queryIdentifier);
        LOG.info("Processing queryComplete for queryIdentifier={} with deleteDelay={} seconds", queryIdentifier, deleteDelay);
        queryInfoMap.remove(queryIdentifier);
        if (queryInfo == null) {
            // Should not happen.
            LOG.warn("Ignoring query complete for unknown dag: {}", queryIdentifier);
            return null;
        }
        String[] localDirs = queryInfo.getLocalDirsNoCreate();
        if (localDirs != null) {
            for (String localDir : localDirs) {
                cleanupDir(localDir, deleteDelay);
                ShuffleHandler.get().unregisterDag(localDir, queryInfo.getAppIdString(), queryInfo.getDagIdentifier());
            }
        }
        if (routeBasedLoggingEnabled) {
            // Inform the routing purgePolicy.
            // Send out a fake log message at the ERROR level with the MDC for this query setup. With an
            // LLAP custom appender this message will not be logged.
            final String dagId = queryInfo.getDagIdString();
            final String queryId = queryInfo.getHiveQueryIdString();
            MDC.put("dagId", dagId);
            MDC.put("queryId", queryId);
            try {
                LOG.error(QUERY_COMPLETE_MARKER, "Ignore this. Log line to interact with logger." + " Query complete: " + queryInfo.getHiveQueryIdString() + ", " + queryInfo.getDagIdString());
            } finally {
                MDC.clear();
            }
        }
        // Clearing this before sending a kill is OK, since canFinish will change to false.
        // Ideally this should be a state machine where kills are issued to the executor,
        // and the structures are cleaned up once all tasks complete. New requests, however,
        // should not be allowed after a query complete is received.
        sourceCompletionMap.remove(queryIdentifier);
        String savedQueryId = queryIdentifierToHiveQueryId.remove(queryIdentifier);
        dagSpecificLocks.remove(queryIdentifier);
        if (savedQueryId != null) {
            ObjectCacheFactory.removeLlapQueryCache(savedQueryId);
        }
        return queryInfo;
    } finally {
        dagLock.writeLock().unlock();
    }
}
Also used : ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock)

Example 3 with ReadWriteLock

use of java.util.concurrent.locks.ReadWriteLock in project orientdb by orientechnologies.

the class OPartitionedLockManager method acquireExclusiveLock.

@Override
public Lock acquireExclusiveLock(T value) {
    final int index;
    if (value == null)
        index = 0;
    else
        index = index(value.hashCode());
    if (useSpinLock) {
        OReadersWriterSpinLock spinLock = spinLocks[index];
        spinLock.acquireWriteLock();
        return new SpinLockWrapper(false, spinLock);
    }
    final ReadWriteLock rwLock = locks[index];
    final Lock lock = rwLock.writeLock();
    lock.lock();
    return lock;
}
Also used : ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) Lock(java.util.concurrent.locks.Lock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock)

Example 4 with ReadWriteLock

use of java.util.concurrent.locks.ReadWriteLock in project orientdb by orientechnologies.

the class OPartitionedLockManager method releaseSharedLock.

public void releaseSharedLock(final T value) {
    final int index;
    if (value == null)
        index = 0;
    else
        index = index(value.hashCode());
    if (useSpinLock) {
        OReadersWriterSpinLock spinLock = spinLocks[index];
        spinLock.releaseReadLock();
        return;
    }
    final ReadWriteLock rwLock = locks[index];
    final Lock lock = rwLock.readLock();
    lock.unlock();
}
Also used : ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) Lock(java.util.concurrent.locks.Lock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock)

Example 5 with ReadWriteLock

use of java.util.concurrent.locks.ReadWriteLock in project orientdb by orientechnologies.

the class OPartitionedLockManager method tryAcquireExclusiveLock.

public boolean tryAcquireExclusiveLock(final T value, final long timeout) throws InterruptedException {
    if (useSpinLock)
        throw new IllegalStateException("Spin lock does not support try lock mode");
    final int index;
    if (value == null)
        index = 0;
    else
        index = index(value.hashCode());
    final ReadWriteLock rwLock = locks[index];
    final Lock lock = rwLock.writeLock();
    return lock.tryLock(timeout, TimeUnit.MILLISECONDS);
}
Also used : ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) Lock(java.util.concurrent.locks.Lock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock)

Aggregations

ReadWriteLock (java.util.concurrent.locks.ReadWriteLock)45 ReentrantReadWriteLock (java.util.concurrent.locks.ReentrantReadWriteLock)36 Lock (java.util.concurrent.locks.Lock)21 Test (org.junit.Test)8 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)4 Nullable (org.jetbrains.annotations.Nullable)4 ArrayList (java.util.ArrayList)3 ReentrantLock (java.util.concurrent.locks.ReentrantLock)3 InternalErrorException (cz.metacentrum.perun.core.api.exceptions.InternalErrorException)2 Method (java.lang.reflect.Method)2 HashMap (java.util.HashMap)2 List (java.util.List)2 ExecutorService (java.util.concurrent.ExecutorService)2 Ignore (org.junit.Ignore)2 Member (cz.metacentrum.perun.core.api.Member)1 InvocationTargetException (java.lang.reflect.InvocationTargetException)1 Random (java.util.Random)1 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)1 Phaser (java.util.concurrent.Phaser)1 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)1