Search in sources :

Example 76 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestInMemorySCMStore method testAddResourceRefAddResourceConcurrency.

@Test
public void testAddResourceRefAddResourceConcurrency() throws Exception {
    startEmptyStore();
    final String key = "key1";
    final String fileName = "foo.jar";
    final String user = "user";
    final ApplicationId id = createAppId(1, 1L);
    // add the resource and add the resource ref at the same time
    ExecutorService exec = HadoopExecutors.newFixedThreadPool(2);
    final CountDownLatch start = new CountDownLatch(1);
    Callable<String> addKeyTask = new Callable<String>() {

        public String call() throws Exception {
            start.await();
            return store.addResource(key, fileName);
        }
    };
    Callable<String> addAppIdTask = new Callable<String>() {

        public String call() throws Exception {
            start.await();
            return store.addResourceReference(key, new SharedCacheResourceReference(id, user));
        }
    };
    Future<String> addAppIdFuture = exec.submit(addAppIdTask);
    Future<String> addKeyFuture = exec.submit(addKeyTask);
    // start them at the same time
    start.countDown();
    // get the results
    String addKeyResult = addKeyFuture.get();
    String addAppIdResult = addAppIdFuture.get();
    assertEquals(fileName, addKeyResult);
    System.out.println("addAppId() result: " + addAppIdResult);
    // it may be null or the fileName depending on the timing
    assertTrue(addAppIdResult == null || addAppIdResult.equals(fileName));
    exec.shutdown();
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) CountDownLatch(java.util.concurrent.CountDownLatch) Callable(java.util.concurrent.Callable) Test(org.junit.Test)

Example 77 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestRowProcessorEndpoint method concurrentExec.

private void concurrentExec(final Runnable task, final int numThreads) throws Throwable {
    startSignal = new CountDownLatch(numThreads);
    doneSignal = new CountDownLatch(numThreads);
    for (int i = 0; i < numThreads; ++i) {
        new Thread(new Runnable() {

            @Override
            public void run() {
                try {
                    startSignal.countDown();
                    startSignal.await();
                    task.run();
                } catch (Throwable e) {
                    failures.incrementAndGet();
                    e.printStackTrace();
                }
                doneSignal.countDown();
            }
        }).start();
    }
    doneSignal.await();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch)

Example 78 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testArchivingOnSingleTable.

@Test(timeout = 300000)
public void testArchivingOnSingleTable() throws Exception {
    createArchiveDirectory();
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<Region> regions = new ArrayList<>();
    regions.add(region);
    when(rss.getOnlineRegions()).thenReturn(regions);
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // get the current hfiles in the archive directory
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
        throw new RuntimeException("Didn't archive any files!");
    }
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
    runCleaner(cleaner, finished, stop);
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
    // but we still have the archive directory
    assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Example 79 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestRemoteBackup method testFullBackupRemote.

/**
   * Verify that a remote full backup is created on a single table with data correctly.
   * @throws Exception
   */
@Test
public void testFullBackupRemote() throws Exception {
    LOG.info("test remote full backup on a single table");
    final CountDownLatch latch = new CountDownLatch(1);
    final int NB_ROWS_IN_FAM3 = 6;
    final byte[] fam3Name = Bytes.toBytes("f3");
    final byte[] fam2Name = Bytes.toBytes("f2");
    final Connection conn = ConnectionFactory.createConnection(conf1);
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                latch.await();
            } catch (InterruptedException ie) {
            }
            try {
                HTable t1 = (HTable) conn.getTable(table1);
                Put p1;
                for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
                    p1 = new Put(Bytes.toBytes("row-t1" + i));
                    p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
                    t1.put(p1);
                }
                LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
                t1.close();
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        }
    };
    t.start();
    table1Desc.addFamily(new HColumnDescriptor(fam3Name));
    // family 2 is MOB enabled
    HColumnDescriptor hcd = new HColumnDescriptor(fam2Name);
    hcd.setMobEnabled(true);
    hcd.setMobThreshold(0L);
    table1Desc.addFamily(hcd);
    HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
    SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
    HTable t1 = (HTable) conn.getTable(table1);
    int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
    latch.countDown();
    String backupId = backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR);
    assertTrue(checkSucceeded(backupId));
    LOG.info("backup complete " + backupId);
    Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH);
    t.join();
    Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3);
    t1.close();
    TableName[] tablesRestoreFull = new TableName[] { table1 };
    TableName[] tablesMapFull = new TableName[] { table1_restore };
    BackupAdmin client = getBackupAdmin();
    client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tablesRestoreFull, tablesMapFull, false));
    // check tables for full restore
    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
    assertTrue(hAdmin.tableExists(table1_restore));
    // #5.2 - checking row count of tables for full restore
    HTable hTable = (HTable) conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH);
    int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
    Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
    int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
    Assert.assertEquals(rows0, rows1);
    hTable.close();
    hAdmin.close();
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Test(org.junit.Test)

Example 80 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestWeakObjectPool method testCongestion.

@Test(timeout = 1000)
public void testCongestion() throws Exception {
    final int THREAD_COUNT = 100;
    final AtomicBoolean assertionFailed = new AtomicBoolean();
    final AtomicReference<Object> expectedObjRef = new AtomicReference<>();
    final CountDownLatch prepareLatch = new CountDownLatch(THREAD_COUNT);
    final CountDownLatch startLatch = new CountDownLatch(1);
    final CountDownLatch endLatch = new CountDownLatch(THREAD_COUNT);
    for (int i = 0; i < THREAD_COUNT; i++) {
        new Thread() {

            @Override
            public void run() {
                prepareLatch.countDown();
                try {
                    startLatch.await();
                    Object obj = pool.get("a");
                    if (!expectedObjRef.compareAndSet(null, obj)) {
                        if (expectedObjRef.get() != obj) {
                            assertionFailed.set(true);
                        }
                    }
                } catch (Exception e) {
                    assertionFailed.set(true);
                } finally {
                    endLatch.countDown();
                }
            }
        }.start();
    }
    prepareLatch.await();
    startLatch.countDown();
    endLatch.await();
    if (assertionFailed.get()) {
        Assert.fail();
    }
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)5355 Test (org.junit.Test)2594 IOException (java.io.IOException)631 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)550 AtomicReference (java.util.concurrent.atomic.AtomicReference)501 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)475 ArrayList (java.util.ArrayList)471 QuickTest (com.hazelcast.test.annotation.QuickTest)375 ParallelTest (com.hazelcast.test.annotation.ParallelTest)355 ExecutorService (java.util.concurrent.ExecutorService)322 Test (org.testng.annotations.Test)310 HazelcastInstance (com.hazelcast.core.HazelcastInstance)251 List (java.util.List)212 HashMap (java.util.HashMap)207 HttpServletResponse (javax.servlet.http.HttpServletResponse)207 ExecutionException (java.util.concurrent.ExecutionException)203 HttpServletRequest (javax.servlet.http.HttpServletRequest)189 Ignite (org.apache.ignite.Ignite)188 ServletException (javax.servlet.ServletException)183 TimeoutException (java.util.concurrent.TimeoutException)168