Search in sources :

Example 51 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestFairCallQueue method assertCanPut.

// Assert we can put exactly the numberOfPuts
public void assertCanPut(BlockingQueue<Schedulable> cq, int numberOfPuts, int putAttempts) throws InterruptedException {
    CountDownLatch latch = new CountDownLatch(numberOfPuts);
    Putter putter = new Putter(cq, putAttempts, null, latch);
    Thread t = new Thread(putter);
    t.start();
    latch.await();
    assertEquals(numberOfPuts, putter.callsAdded);
    t.interrupt();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch)

Example 52 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestLazyPersistFiles method testConcurrentWrites.

/**
   * Concurrent write with eviction
   * RAM_DISK can hold 9 replicas
   * 4 threads each write 5 replicas
   * @throws IOException
   * @throws InterruptedException
   */
@Test
public void testConcurrentWrites() throws IOException, InterruptedException {
    getClusterBuilder().setRamDiskReplicaCapacity(9).build();
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    final int SEED = 0xFADED;
    final int NUM_WRITERS = 4;
    final int NUM_WRITER_PATHS = 5;
    Path[][] paths = new Path[NUM_WRITERS][NUM_WRITER_PATHS];
    for (int i = 0; i < NUM_WRITERS; i++) {
        paths[i] = new Path[NUM_WRITER_PATHS];
        for (int j = 0; j < NUM_WRITER_PATHS; j++) {
            paths[i][j] = new Path("/" + METHOD_NAME + ".Writer" + i + ".File." + j + ".dat");
        }
    }
    final CountDownLatch latch = new CountDownLatch(NUM_WRITERS);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    ExecutorService executor = Executors.newFixedThreadPool(THREADPOOL_SIZE);
    for (int i = 0; i < NUM_WRITERS; i++) {
        Runnable writer = new WriterRunnable(i, paths[i], SEED, latch, testFailed);
        executor.execute(writer);
    }
    Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
    triggerBlockReport();
    // Stop executor from adding new tasks to finish existing threads in queue
    latch.await();
    assertThat(testFailed.get(), is(false));
}
Also used : Path(org.apache.hadoop.fs.Path) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ExecutorService(java.util.concurrent.ExecutorService) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 53 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestBlockManager method testBlockReportQueueing.

@Test
public void testBlockReportQueueing() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        final FSNamesystem fsn = cluster.getNamesystem();
        final BlockManager bm = fsn.getBlockManager();
        final ExecutorService executor = Executors.newCachedThreadPool();
        final CyclicBarrier startBarrier = new CyclicBarrier(2);
        final CountDownLatch endLatch = new CountDownLatch(3);
        final CountDownLatch doneLatch = new CountDownLatch(1);
        // create a task intended to block while processing, thus causing
        // the queue to backup.  simulates how a full BR is processed.
        FutureTask<?> blockingOp = new FutureTask<Void>(new Callable<Void>() {

            @Override
            public Void call() throws IOException {
                bm.runBlockOp(new Callable<Void>() {

                    @Override
                    public Void call() throws InterruptedException, BrokenBarrierException {
                        // use a barrier to control the blocking.
                        startBarrier.await();
                        endLatch.countDown();
                        return null;
                    }
                });
                // signal that runBlockOp returned
                doneLatch.countDown();
                return null;
            }
        });
        // create an async task.  simulates how an IBR is processed.
        Callable<?> asyncOp = new Callable<Void>() {

            @Override
            public Void call() throws IOException {
                bm.enqueueBlockOp(new Runnable() {

                    @Override
                    public void run() {
                        // use the latch to signal if the op has run.
                        endLatch.countDown();
                    }
                });
                return null;
            }
        };
        // calling get forces its execution so we can test if it's blocked.
        Future<?> blockedFuture = executor.submit(blockingOp);
        boolean isBlocked = false;
        try {
            // wait 1s for the future to block.  it should run instantaneously.
            blockedFuture.get(1, TimeUnit.SECONDS);
        } catch (TimeoutException te) {
            isBlocked = true;
        }
        assertTrue(isBlocked);
        // should effectively return immediately since calls are queued.
        // however they should be backed up in the queue behind the blocking
        // operation.
        executor.submit(asyncOp).get(1, TimeUnit.SECONDS);
        executor.submit(asyncOp).get(1, TimeUnit.SECONDS);
        // check the async calls are queued, and first is still blocked.
        assertEquals(2, bm.getBlockOpQueueLength());
        assertFalse(blockedFuture.isDone());
        // unblock the queue, wait for last op to complete, check the blocked
        // call has returned
        startBarrier.await(1, TimeUnit.SECONDS);
        assertTrue(endLatch.await(1, TimeUnit.SECONDS));
        assertEquals(0, bm.getBlockOpQueueLength());
        assertTrue(doneLatch.await(1, TimeUnit.SECONDS));
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) CountDownLatch(java.util.concurrent.CountDownLatch) Callable(java.util.concurrent.Callable) CyclicBarrier(java.util.concurrent.CyclicBarrier) FutureTask(java.util.concurrent.FutureTask) ExecutorService(java.util.concurrent.ExecutorService) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 54 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestBlockManager method testAsyncIBR.

// spam the block manager with IBRs to verify queuing is occurring.
@Test
public void testAsyncIBR() throws Exception {
    Logger.getRootLogger().setLevel(Level.WARN);
    // will create files with many small blocks.
    final int blkSize = 4 * 1024;
    final int fileSize = blkSize * 100;
    final byte[] buf = new byte[2 * blkSize];
    final int numWriters = 4;
    final int repl = 3;
    final CyclicBarrier barrier = new CyclicBarrier(numWriters);
    final CountDownLatch writeLatch = new CountDownLatch(numWriters);
    final AtomicBoolean failure = new AtomicBoolean();
    final Configuration conf = new HdfsConfiguration();
    conf.getLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, blkSize);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
    try {
        cluster.waitActive();
        // create multiple writer threads to create a file with many blocks.
        // will test that concurrent writing causes IBR batching in the NN
        Thread[] writers = new Thread[numWriters];
        for (int i = 0; i < writers.length; i++) {
            final Path p = new Path("/writer" + i);
            writers[i] = new Thread(new Runnable() {

                @Override
                public void run() {
                    try {
                        FileSystem fs = cluster.getFileSystem();
                        FSDataOutputStream os = fs.create(p, true, buf.length, (short) repl, blkSize);
                        // align writers for maximum chance of IBR batching.
                        barrier.await();
                        int remaining = fileSize;
                        while (remaining > 0) {
                            os.write(buf);
                            remaining -= buf.length;
                        }
                        os.close();
                    } catch (Exception e) {
                        e.printStackTrace();
                        failure.set(true);
                    }
                    // let main thread know we are done.
                    writeLatch.countDown();
                }
            });
            writers[i].start();
        }
        // when and how many IBRs are queued is indeterminate, so just watch
        // the metrics and verify something was queued at during execution.
        boolean sawQueued = false;
        while (!writeLatch.await(10, TimeUnit.MILLISECONDS)) {
            assertFalse(failure.get());
            MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
            long queued = MetricsAsserts.getIntGauge("BlockOpsQueued", rb);
            sawQueued |= (queued > 0);
        }
        assertFalse(failure.get());
        assertTrue(sawQueued);
        // verify that batching of the IBRs occurred.
        MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
        long batched = MetricsAsserts.getLongCounter("BlockOpsBatched", rb);
        assertTrue(batched > 0);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) CountDownLatch(java.util.concurrent.CountDownLatch) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) RemoteException(org.apache.hadoop.ipc.RemoteException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 55 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestDecommissionWithStriped method testDecommissionWithURBlockForSameBlockGroup.

@Test(timeout = 120000)
public void testDecommissionWithURBlockForSameBlockGroup() throws Exception {
    LOG.info("Starting test testDecommissionWithURBlocksForSameBlockGroup");
    final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
    int writeBytes = cellSize * dataBlocks * 2;
    writeStripedFile(dfs, ecFile, writeBytes);
    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
    final List<DatanodeInfo> decommisionNodes = new ArrayList<DatanodeInfo>();
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0).get(0);
    DatanodeInfo[] dnLocs = lb.getLocations();
    assertEquals(dataBlocks + parityBlocks, dnLocs.length);
    int decommNodeIndex = dataBlocks - 1;
    int stopNodeIndex = 1;
    // add the nodes which will be decommissioning
    decommisionNodes.add(dnLocs[decommNodeIndex]);
    // stop excess dns to avoid immediate reconstruction.
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    List<DataNodeProperties> stoppedDns = new ArrayList<>();
    for (DatanodeInfo liveDn : info) {
        boolean usedNode = false;
        for (DatanodeInfo datanodeInfo : dnLocs) {
            if (liveDn.getXferAddr().equals(datanodeInfo.getXferAddr())) {
                usedNode = true;
                break;
            }
        }
        if (!usedNode) {
            DataNode dn = cluster.getDataNode(liveDn.getIpcPort());
            stoppedDns.add(cluster.stopDataNode(liveDn.getXferAddr()));
            cluster.setDataNodeDead(dn.getDatanodeId());
            LOG.info("stop datanode " + dn.getDatanodeId().getHostName());
        }
    }
    DataNode dn = cluster.getDataNode(dnLocs[stopNodeIndex].getIpcPort());
    cluster.stopDataNode(dnLocs[stopNodeIndex].getXferAddr());
    cluster.setDataNodeDead(dn.getDatanodeId());
    numDNs = numDNs - 1;
    // Decommission node in a new thread. Verify that node is decommissioned.
    final CountDownLatch decomStarted = new CountDownLatch(0);
    Thread decomTh = new Thread() {

        public void run() {
            try {
                decomStarted.countDown();
                decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
            } catch (Exception e) {
                LOG.error("Exception while decommissioning", e);
                Assert.fail("Shouldn't throw exception!");
            }
        }

        ;
    };
    int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
    int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
    decomTh.start();
    decomStarted.await(5, TimeUnit.SECONDS);
    // grace period to trigger decommissioning call
    Thread.sleep(3000);
    // start datanode so that decommissioning live node will be finished
    for (DataNodeProperties dnp : stoppedDns) {
        cluster.restartDataNode(dnp);
        LOG.info("Restarts stopped datanode:{} to trigger block reconstruction", dnp.datanode);
    }
    cluster.waitActive();
    LOG.info("Waiting to finish decommissioning node:{}", decommisionNodes);
    // waiting 20secs to finish decommission
    decomTh.join(20000);
    LOG.info("Finished decommissioning node:{}", decommisionNodes);
    assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes());
    assertEquals(liveDecomissioned + decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
    // Ensure decommissioned datanode is not automatically shutdown
    DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
    assertEquals("All datanodes must be alive", numDNs, client.datanodeReport(DatanodeReportType.LIVE).length);
    assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
    StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes, null, blockGroupSize);
    cleanupFile(dfs, ecFile);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Test(org.junit.Test)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)5355 Test (org.junit.Test)2594 IOException (java.io.IOException)631 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)550 AtomicReference (java.util.concurrent.atomic.AtomicReference)501 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)475 ArrayList (java.util.ArrayList)471 QuickTest (com.hazelcast.test.annotation.QuickTest)375 ParallelTest (com.hazelcast.test.annotation.ParallelTest)355 ExecutorService (java.util.concurrent.ExecutorService)322 Test (org.testng.annotations.Test)310 HazelcastInstance (com.hazelcast.core.HazelcastInstance)251 List (java.util.List)212 HashMap (java.util.HashMap)207 HttpServletResponse (javax.servlet.http.HttpServletResponse)207 ExecutionException (java.util.concurrent.ExecutionException)203 HttpServletRequest (javax.servlet.http.HttpServletRequest)189 Ignite (org.apache.ignite.Ignite)188 ServletException (javax.servlet.ServletException)183 TimeoutException (java.util.concurrent.TimeoutException)168