Search in sources :

Example 1 with ReentrantReadWriteLock

use of java.util.concurrent.locks.ReentrantReadWriteLock in project hadoop by apache.

the class TestInstrumentedReadWriteLock method testReadLockLongHoldingReport.

/**
   * Tests the warning when the read lock is held longer than threshold.
   * @throws Exception
   */
@Test(timeout = 10000)
public void testReadLockLongHoldingReport() throws Exception {
    String testname = name.getMethodName();
    final AtomicLong time = new AtomicLong(0);
    Timer mclock = new Timer() {

        @Override
        public long monotonicNow() {
            return time.get();
        }
    };
    final AtomicLong wlogged = new AtomicLong(0);
    final AtomicLong wsuppresed = new AtomicLong(0);
    ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(true);
    InstrumentedReadLock readLock = new InstrumentedReadLock(testname, LOG, readWriteLock, 2000, 300, mclock) {

        @Override
        protected void logWarning(long lockHeldTime, long suppressed) {
            wlogged.incrementAndGet();
            wsuppresed.set(suppressed);
        }
    };
    // t = 0
    readLock.lock();
    time.set(100);
    // t = 100
    readLock.unlock();
    assertEquals(0, wlogged.get());
    assertEquals(0, wsuppresed.get());
    // t = 100
    readLock.lock();
    time.set(500);
    // t = 500
    readLock.unlock();
    assertEquals(1, wlogged.get());
    assertEquals(0, wsuppresed.get());
    // the suppress counting is only changed when
    // log is needed in the test
    // t = 500
    readLock.lock();
    time.set(900);
    // t = 900
    readLock.unlock();
    assertEquals(1, wlogged.get());
    assertEquals(0, wsuppresed.get());
    // t = 900
    readLock.lock();
    time.set(3000);
    // t = 3000
    readLock.unlock();
    assertEquals(2, wlogged.get());
    assertEquals(1, wsuppresed.get());
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Test(org.junit.Test)

Example 2 with ReentrantReadWriteLock

use of java.util.concurrent.locks.ReentrantReadWriteLock in project hadoop by apache.

the class NameNodeAdapter method spyOnFsLock.

public static ReentrantReadWriteLock spyOnFsLock(FSNamesystem fsn) {
    ReentrantReadWriteLock spy = Mockito.spy(fsn.getFsLockForTests());
    fsn.setFsLockForTests(spy);
    return spy;
}
Also used : ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock)

Example 3 with ReentrantReadWriteLock

use of java.util.concurrent.locks.ReentrantReadWriteLock in project hadoop by apache.

the class TestLogAggregationService method testFixedSizeThreadPool.

@Test(timeout = 30000)
public void testFixedSizeThreadPool() throws Exception {
    // store configured thread pool size temporarily for restoration
    int initThreadPoolSize = conf.getInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, YarnConfiguration.DEFAULT_NM_LOG_AGGREGATION_THREAD_POOL_SIZE);
    int threadPoolSize = 3;
    conf.setInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, threadPoolSize);
    DeletionService delSrvc = mock(DeletionService.class);
    LocalDirsHandlerService dirSvc = mock(LocalDirsHandlerService.class);
    when(dirSvc.getLogDirs()).thenThrow(new RuntimeException());
    LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, delSrvc, dirSvc);
    logAggregationService.init(this.conf);
    logAggregationService.start();
    ExecutorService executorService = logAggregationService.threadPool;
    // used to block threads in the thread pool because main thread always
    // acquires the write lock first.
    final ReadWriteLock rwLock = new ReentrantReadWriteLock();
    final Lock rLock = rwLock.readLock();
    final Lock wLock = rwLock.writeLock();
    try {
        wLock.lock();
        Runnable runnable = new Runnable() {

            @Override
            public void run() {
                try {
                    // threads in the thread pool running this will be blocked
                    rLock.tryLock(35000, TimeUnit.MILLISECONDS);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } finally {
                    rLock.unlock();
                }
            }
        };
        // created in the thread pool, each of which is blocked on the read lock.
        for (int i = 0; i < threadPoolSize + 1; i++) {
            executorService.submit(runnable);
        }
        // count the number of current running LogAggregationService threads
        int runningThread = ((ThreadPoolExecutor) executorService).getActiveCount();
        assertEquals(threadPoolSize, runningThread);
    } finally {
        wLock.unlock();
    }
    logAggregationService.stop();
    logAggregationService.close();
    // restore the original configurations to avoid side effects
    conf.setInt(YarnConfiguration.NM_LOG_AGGREGATION_THREAD_POOL_SIZE, initThreadPoolSize);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) DeletionService(org.apache.hadoop.yarn.server.nodemanager.DeletionService) ExecutorService(java.util.concurrent.ExecutorService) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) LocalDirsHandlerService(org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Lock(java.util.concurrent.locks.Lock) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 4 with ReentrantReadWriteLock

use of java.util.concurrent.locks.ReentrantReadWriteLock in project hadoop by apache.

the class ProportionalCapacityPreemptionPolicyMockFramework method mockQueueHierarchy.

/**
   * Format is:
   * <pre>
   * root (<partition-name-1>=[guaranteed max used pending (reserved)],<partition-name-2>=..);
   * -A(...);
   * --A1(...);
   * --A2(...);
   * -B...
   * </pre>
   * ";" splits queues, and there should no empty lines, no extra spaces
   *
   * For each queue, it has configurations to specify capacities (to each
   * partition), format is:
   * <pre>
   * -<queueName> (<labelName1>=[guaranteed max used pending], \
   *               <labelName2>=[guaranteed max used pending])
   *              {key1=value1,key2=value2};  // Additional configs
   * </pre>
   */
@SuppressWarnings({ "unchecked", "rawtypes" })
private ParentQueue mockQueueHierarchy(String queueExprs) {
    String[] queueExprArray = queueExprs.split(";");
    ParentQueue rootQueue = null;
    for (int idx = 0; idx < queueExprArray.length; idx++) {
        String q = queueExprArray[idx];
        CSQueue queue;
        // Initialize queue
        if (isParent(queueExprArray, idx)) {
            ParentQueue parentQueue = mock(ParentQueue.class);
            queue = parentQueue;
            List<CSQueue> children = new ArrayList<CSQueue>();
            when(parentQueue.getChildQueues()).thenReturn(children);
            QueueOrderingPolicy policy = mock(QueueOrderingPolicy.class);
            when(policy.getConfigName()).thenReturn(CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY);
            when(parentQueue.getQueueOrderingPolicy()).thenReturn(policy);
        } else {
            LeafQueue leafQueue = mock(LeafQueue.class);
            final TreeSet<FiCaSchedulerApp> apps = new TreeSet<>(new Comparator<FiCaSchedulerApp>() {

                @Override
                public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) {
                    if (a1.getPriority() != null && !a1.getPriority().equals(a2.getPriority())) {
                        return a1.getPriority().compareTo(a2.getPriority());
                    }
                    int res = a1.getApplicationId().compareTo(a2.getApplicationId());
                    return res;
                }
            });
            when(leafQueue.getApplications()).thenReturn(apps);
            when(leafQueue.getAllApplications()).thenReturn(apps);
            OrderingPolicy<FiCaSchedulerApp> so = mock(OrderingPolicy.class);
            when(so.getPreemptionIterator()).thenAnswer(new Answer() {

                public Object answer(InvocationOnMock invocation) {
                    return apps.descendingIterator();
                }
            });
            when(leafQueue.getOrderingPolicy()).thenReturn(so);
            Map<String, TreeSet<RMContainer>> ignorePartitionContainers = new HashMap<>();
            when(leafQueue.getIgnoreExclusivityRMContainers()).thenReturn(ignorePartitionContainers);
            queue = leafQueue;
        }
        ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
        when(queue.getReadLock()).thenReturn(lock.readLock());
        setupQueue(queue, q, queueExprArray, idx);
        if (queue.getQueueName().equals(ROOT)) {
            rootQueue = (ParentQueue) queue;
        }
    }
    return rootQueue;
}
Also used : ParentQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) QueueOrderingPolicy(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.QueueOrderingPolicy) LeafQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue) CSQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Answer(org.mockito.stubbing.Answer) TreeSet(java.util.TreeSet) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)

Example 5 with ReentrantReadWriteLock

use of java.util.concurrent.locks.ReentrantReadWriteLock in project hadoop by apache.

the class TestProportionalCapacityPreemptionPolicy method mockLeafQueue.

@SuppressWarnings("rawtypes")
LeafQueue mockLeafQueue(ParentQueue p, Resource tot, int i, Resource[] abs, Resource[] used, Resource[] pending, Resource[] reserved, int[] apps, Resource[] gran) {
    LeafQueue lq = mock(LeafQueue.class);
    ResourceCalculator rc = mCS.getResourceCalculator();
    List<ApplicationAttemptId> appAttemptIdList = new ArrayList<ApplicationAttemptId>();
    when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), isA(String.class), eq(false))).thenReturn(pending[i]);
    when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), isA(String.class), eq(true))).thenReturn(Resources.componentwiseMax(Resources.subtract(pending[i], reserved[i] == null ? Resources.none() : reserved[i]), Resources.none()));
    // need to set pending resource in resource usage as well
    ResourceUsage ru = new ResourceUsage();
    ru.setPending(pending[i]);
    ru.setUsed(used[i]);
    ru.setReserved(reserved[i]);
    when(lq.getQueueResourceUsage()).thenReturn(ru);
    // consider moving where CapacityScheduler::comparator accessible
    final NavigableSet<FiCaSchedulerApp> qApps = new TreeSet<FiCaSchedulerApp>(new Comparator<FiCaSchedulerApp>() {

        @Override
        public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) {
            return a1.getApplicationAttemptId().compareTo(a2.getApplicationAttemptId());
        }
    });
    // applications are added in global L->R order in queues
    if (apps[i] != 0) {
        Resource aUsed = Resources.divideAndCeil(rc, used[i], apps[i]);
        Resource aPending = Resources.divideAndCeil(rc, pending[i], apps[i]);
        Resource aReserve = Resources.divideAndCeil(rc, reserved[i], apps[i]);
        for (int a = 0; a < apps[i]; ++a) {
            FiCaSchedulerApp mockFiCaApp = mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i]);
            qApps.add(mockFiCaApp);
            ++appAlloc;
            appAttemptIdList.add(mockFiCaApp.getApplicationAttemptId());
        }
        when(mCS.getAppsInQueue("queue" + (char) ('A' + i - 1))).thenReturn(appAttemptIdList);
    }
    when(lq.getApplications()).thenReturn(qApps);
    @SuppressWarnings("unchecked") OrderingPolicy<FiCaSchedulerApp> so = mock(OrderingPolicy.class);
    when(so.getPreemptionIterator()).thenAnswer(new Answer() {

        public Object answer(InvocationOnMock invocation) {
            return qApps.descendingIterator();
        }
    });
    when(lq.getOrderingPolicy()).thenReturn(so);
    if (setAMResourcePercent != 0.0f) {
        when(lq.getMaxAMResourcePerQueuePercent()).thenReturn(setAMResourcePercent);
    }
    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
    when(lq.getReadLock()).thenReturn(lock.readLock());
    when(lq.getPriority()).thenReturn(Priority.newInstance(0));
    p.getChildQueues().add(lq);
    return lq;
}
Also used : ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage) ArrayList(java.util.ArrayList) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Matchers.anyString(org.mockito.Matchers.anyString) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) LeafQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) DominantResourceCalculator(org.apache.hadoop.yarn.util.resource.DominantResourceCalculator) ResourceCalculator(org.apache.hadoop.yarn.util.resource.ResourceCalculator) Answer(org.mockito.stubbing.Answer) TreeSet(java.util.TreeSet) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)

Aggregations

ReentrantReadWriteLock (java.util.concurrent.locks.ReentrantReadWriteLock)52 ReadWriteLock (java.util.concurrent.locks.ReadWriteLock)17 Test (org.junit.Test)15 ArrayList (java.util.ArrayList)6 Lock (java.util.concurrent.locks.Lock)5 DatasetGraph (org.apache.jena.sparql.core.DatasetGraph)5 HashMap (java.util.HashMap)4 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)4 Nullable (org.jetbrains.annotations.Nullable)4 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)3 InternalErrorException (cz.metacentrum.perun.core.api.exceptions.InternalErrorException)2 IOException (java.io.IOException)2 List (java.util.List)2 TreeSet (java.util.TreeSet)2 ExecutorService (java.util.concurrent.ExecutorService)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 ReentrantLock (java.util.concurrent.locks.ReentrantLock)2 PostLoad (javax.persistence.PostLoad)2 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2