Search in sources :

Example 56 with Ignore

use of org.junit.Ignore in project hbase by apache.

the class TestDistributedLogSplitting method testLogReplayForDisablingTable.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testLogReplayForDisablingTable() throws Exception {
    LOG.info("testLogReplayForDisablingTable");
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    startCluster(NUM_RS);
    final int NUM_REGIONS_TO_CREATE = 40;
    final int NUM_LOG_LINES = 1000;
    List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table disablingHT = installTable(zkw, "disableTable", "family", NUM_REGIONS_TO_CREATE);
    Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE, NUM_REGIONS_TO_CREATE);
    try {
        // turn off load balancing to prevent regions from moving around otherwise
        // they will consume recovered.edits
        master.balanceSwitch(false);
        List<HRegionInfo> regions = null;
        HRegionServer hrs = null;
        boolean hasRegionsForBothTables = false;
        String tableName = null;
        for (int i = 0; i < NUM_RS; i++) {
            tableName = null;
            hasRegionsForBothTables = false;
            boolean isCarryingSystem = false;
            hrs = rsts.get(i).getRegionServer();
            regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
            for (HRegionInfo region : regions) {
                if (region.getTable().isSystemTable()) {
                    isCarryingSystem = true;
                    break;
                }
                if (tableName != null && !tableName.equalsIgnoreCase(region.getTable().getNameAsString())) {
                    // make sure that we find a RS has online regions for both "table" and "disableTable"
                    hasRegionsForBothTables = true;
                    break;
                } else if (tableName == null) {
                    tableName = region.getTable().getNameAsString();
                }
            }
            if (isCarryingSystem) {
                continue;
            }
            if (hasRegionsForBothTables) {
                break;
            }
        }
        // make sure we found a good RS
        Assert.assertTrue(hasRegionsForBothTables);
        LOG.info("#regions = " + regions.size());
        Iterator<HRegionInfo> it = regions.iterator();
        while (it.hasNext()) {
            HRegionInfo region = it.next();
            if (region.isMetaTable()) {
                it.remove();
            }
        }
        makeWAL(hrs, regions, "disableTable", "family", NUM_LOG_LINES, 100, false);
        makeWAL(hrs, regions, "table", "family", NUM_LOG_LINES, 100);
        LOG.info("Disabling table\n");
        TEST_UTIL.getAdmin().disableTable(TableName.valueOf(name.getMethodName()));
        TEST_UTIL.waitTableDisabled(TableName.valueOf(name.getMethodName()).getName());
        // abort RS
        LOG.info("Aborting region server: " + hrs.getServerName());
        hrs.abort("testing");
        // wait for abort completes
        TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return (cluster.getLiveRegionServerThreads().size() <= (NUM_RS - 1));
            }
        });
        // wait for regions come online
        TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return (HBaseTestingUtility.getAllOnlineRegions(cluster).size() >= (NUM_REGIONS_TO_CREATE + 1));
            }
        });
        // wait for all regions are fully recovered
        TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(zkw.znodePaths.recoveringRegionsZNode, false);
                ServerManager serverManager = master.getServerManager();
                return (!serverManager.areDeadServersInProgress() && recoveringRegions != null && recoveringRegions.isEmpty());
            }
        });
        int count = 0;
        FileSystem fs = master.getMasterFileSystem().getFileSystem();
        Path rootdir = FSUtils.getRootDir(conf);
        Path tdir = FSUtils.getTableDir(rootdir, TableName.valueOf(name.getMethodName()));
        for (HRegionInfo hri : regions) {
            Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
            LOG.debug("checking edits dir " + editsdir);
            if (!fs.exists(editsdir))
                continue;
            FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {

                @Override
                public boolean accept(Path p) {
                    if (WALSplitter.isSequenceIdFile(p)) {
                        return false;
                    }
                    return true;
                }
            });
            if (files != null) {
                for (FileStatus file : files) {
                    int c = countWAL(file.getPath(), fs, conf);
                    count += c;
                    LOG.info(c + " edits in " + file.getPath());
                }
            }
        }
        LOG.info("Verify edits in recovered.edits files");
        assertEquals(NUM_LOG_LINES, count);
        LOG.info("Verify replayed edits");
        assertEquals(NUM_LOG_LINES, TEST_UTIL.countRows(ht));
        // clean up
        for (HRegionInfo hri : regions) {
            Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
            fs.delete(editsdir, true);
        }
        disablingHT.close();
    } finally {
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) OperationConflictException(org.apache.hadoop.hbase.exceptions.OperationConflictException) RegionInRecoveryException(org.apache.hadoop.hbase.exceptions.RegionInRecoveryException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) RetriesExhaustedWithDetailsException(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Waiter(org.apache.hadoop.hbase.Waiter) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 57 with Ignore

use of org.junit.Ignore in project hbase by apache.

the class TestDistributedLogSplitting method testSameVersionUpdatesRecoveryWithCompaction.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testSameVersionUpdatesRecoveryWithCompaction() throws Exception {
    LOG.info("testSameVersionUpdatesRecoveryWithWrites");
    conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 30 * 1024);
    conf.setInt("hbase.hstore.compactionThreshold", 3);
    startCluster(NUM_RS);
    final AtomicLong sequenceId = new AtomicLong(100);
    final int NUM_REGIONS_TO_CREATE = 40;
    final int NUM_LOG_LINES = 2000;
    // turn off load balancing to prevent regions from moving around otherwise
    // they will consume recovered.edits
    master.balanceSwitch(false);
    List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
    try {
        List<HRegionInfo> regions = null;
        HRegionServer hrs = null;
        for (int i = 0; i < NUM_RS; i++) {
            boolean isCarryingMeta = false;
            hrs = rsts.get(i).getRegionServer();
            regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
            for (HRegionInfo region : regions) {
                if (region.isMetaRegion()) {
                    isCarryingMeta = true;
                    break;
                }
            }
            if (isCarryingMeta) {
                continue;
            }
            break;
        }
        LOG.info("#regions = " + regions.size());
        Iterator<HRegionInfo> it = regions.iterator();
        while (it.hasNext()) {
            HRegionInfo region = it.next();
            if (region.isMetaTable() || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
                it.remove();
            }
        }
        if (regions.isEmpty())
            return;
        HRegionInfo curRegionInfo = regions.get(0);
        byte[] startRow = curRegionInfo.getStartKey();
        if (startRow == null || startRow.length == 0) {
            startRow = new byte[] { 0, 0, 0, 0, 1 };
        }
        byte[] row = Bytes.incrementBytes(startRow, 1);
        // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
        row = Arrays.copyOfRange(row, 3, 8);
        long value = 0;
        final TableName tableName = TableName.valueOf(name.getMethodName());
        byte[] family = Bytes.toBytes("family");
        byte[] qualifier = Bytes.toBytes("c1");
        long timeStamp = System.currentTimeMillis();
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(family));
        final WAL wal = hrs.getWAL(curRegionInfo);
        for (int i = 0; i < NUM_LOG_LINES; i += 1) {
            WALEdit e = new WALEdit();
            value++;
            e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
            wal.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), e, true);
        }
        wal.sync();
        wal.shutdown();
        // wait for abort completes
        this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
        // verify we got the last value
        LOG.info("Verification Starts...");
        Get g = new Get(row);
        Result r = ht.get(g);
        long theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
        assertEquals(value, theStoredVal);
        // after flush & compaction
        LOG.info("Verification after flush...");
        TEST_UTIL.getAdmin().flush(tableName);
        TEST_UTIL.getAdmin().compact(tableName);
        // wait for compaction completes
        TEST_UTIL.waitFor(30000, 200, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return (TEST_UTIL.getAdmin().getCompactionState(tableName) == CompactionState.NONE);
            }
        });
        r = ht.get(g);
        theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
        assertEquals(value, theStoredVal);
    } finally {
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) OperationConflictException(org.apache.hadoop.hbase.exceptions.OperationConflictException) RegionInRecoveryException(org.apache.hadoop.hbase.exceptions.RegionInRecoveryException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) RetriesExhaustedWithDetailsException(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) AtomicLong(java.util.concurrent.atomic.AtomicLong) Get(org.apache.hadoop.hbase.client.Get) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Waiter(org.apache.hadoop.hbase.Waiter) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 58 with Ignore

use of org.junit.Ignore in project hbase by apache.

the class TestDistributedLogSplitting method testRecoveredEdits.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testRecoveredEdits() throws Exception {
    LOG.info("testRecoveredEdits");
    // create more than one wal
    conf.setLong("hbase.regionserver.hlog.blocksize", 30 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false);
    startCluster(NUM_RS);
    final int NUM_LOG_LINES = 1000;
    final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager();
    // turn off load balancing to prevent regions from moving around otherwise
    // they will consume recovered.edits
    master.balanceSwitch(false);
    FileSystem fs = master.getMasterFileSystem().getFileSystem();
    List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
    Path rootdir = FSUtils.getRootDir(conf);
    Table t = installTable(new ZooKeeperWatcher(conf, "table-creation", null), "table", "family", 40);
    try {
        TableName table = t.getName();
        List<HRegionInfo> regions = null;
        HRegionServer hrs = null;
        for (int i = 0; i < NUM_RS; i++) {
            boolean foundRs = false;
            hrs = rsts.get(i).getRegionServer();
            regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
            for (HRegionInfo region : regions) {
                if (region.getTable().getNameAsString().equalsIgnoreCase("table")) {
                    foundRs = true;
                    break;
                }
            }
            if (foundRs)
                break;
        }
        final Path logDir = new Path(rootdir, AbstractFSWALProvider.getWALDirectoryName(hrs.getServerName().toString()));
        LOG.info("#regions = " + regions.size());
        Iterator<HRegionInfo> it = regions.iterator();
        while (it.hasNext()) {
            HRegionInfo region = it.next();
            if (region.getTable().getNamespaceAsString().equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
                it.remove();
            }
        }
        makeWAL(hrs, regions, "table", "family", NUM_LOG_LINES, 100);
        slm.splitLogDistributed(logDir);
        int count = 0;
        for (HRegionInfo hri : regions) {
            Path tdir = FSUtils.getTableDir(rootdir, table);
            Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName()));
            LOG.debug("checking edits dir " + editsdir);
            FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {

                @Override
                public boolean accept(Path p) {
                    if (WALSplitter.isSequenceIdFile(p)) {
                        return false;
                    }
                    return true;
                }
            });
            assertTrue("edits dir should have more than a single file in it. instead has " + files.length, files.length > 1);
            for (int i = 0; i < files.length; i++) {
                int c = countWAL(files[i].getPath(), fs, conf);
                count += c;
            }
            LOG.info(count + " edits in " + files.length + " recovered edits files.");
        }
        // check that the log file is moved
        assertFalse(fs.exists(logDir));
        assertEquals(NUM_LOG_LINES, count);
    } finally {
        if (t != null)
            t.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) FileSystem(org.apache.hadoop.fs.FileSystem) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 59 with Ignore

use of org.junit.Ignore in project hbase by apache.

the class TestDistributedLogSplitting method testWorkerAbort.

/**
   * The original intention of this test was to force an abort of a region
   * server and to make sure that the failure path in the region servers is
   * properly evaluated. But it is difficult to ensure that the region server
   * doesn't finish the log splitting before it aborts. Also now, there is
   * this code path where the master will preempt the region server when master
   * detects that the region server has aborted.
   * @throws Exception
   */
@Ignore("Disabled because flakey")
@Test(timeout = 300000)
public void testWorkerAbort() throws Exception {
    LOG.info("testWorkerAbort");
    startCluster(3);
    final int NUM_LOG_LINES = 10000;
    final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager();
    FileSystem fs = master.getMasterFileSystem().getFileSystem();
    final List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
    HRegionServer hrs = findRSToKill(false, "table");
    Path rootdir = FSUtils.getRootDir(conf);
    final Path logDir = new Path(rootdir, AbstractFSWALProvider.getWALDirectoryName(hrs.getServerName().toString()));
    Table t = installTable(new ZooKeeperWatcher(conf, "table-creation", null), "table", "family", 40);
    try {
        makeWAL(hrs, ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices()), "table", "family", NUM_LOG_LINES, 100);
        new Thread() {

            @Override
            public void run() {
                waitForCounter(tot_wkr_task_acquired, 0, 1, 1000);
                for (RegionServerThread rst : rsts) {
                    rst.getRegionServer().abort("testing");
                    break;
                }
            }
        }.start();
        // slm.splitLogDistributed(logDir);
        FileStatus[] logfiles = fs.listStatus(logDir);
        TaskBatch batch = new TaskBatch();
        slm.enqueueSplitTask(logfiles[0].getPath().toString(), batch);
        //waitForCounter but for one of the 2 counters
        long curt = System.currentTimeMillis();
        long waitTime = 80000;
        long endt = curt + waitTime;
        while (curt < endt) {
            if ((tot_wkr_task_resigned.get() + tot_wkr_task_err.get() + tot_wkr_final_transition_failed.get() + tot_wkr_task_done.get() + tot_wkr_preempt_task.get()) == 0) {
                Thread.yield();
                curt = System.currentTimeMillis();
            } else {
                assertTrue(1 <= (tot_wkr_task_resigned.get() + tot_wkr_task_err.get() + tot_wkr_final_transition_failed.get() + tot_wkr_task_done.get() + tot_wkr_preempt_task.get()));
                return;
            }
        }
        fail("none of the following counters went up in " + waitTime + " milliseconds - " + "tot_wkr_task_resigned, tot_wkr_task_err, " + "tot_wkr_final_transition_failed, tot_wkr_task_done, " + "tot_wkr_preempt_task");
    } finally {
        if (t != null)
            t.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) TaskBatch(org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) FileSystem(org.apache.hadoop.fs.FileSystem) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 60 with Ignore

use of org.junit.Ignore in project hbase by apache.

the class TestDistributedLogSplitting method testNonceRecovery.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testNonceRecovery() throws Exception {
    LOG.info("testNonceRecovery");
    final String TABLE_NAME = "table";
    final String FAMILY_NAME = "family";
    final int NUM_REGIONS_TO_CREATE = 40;
    conf.setLong("hbase.regionserver.hlog.blocksize", 100 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    startCluster(NUM_RS);
    master.balanceSwitch(false);
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table ht = installTable(zkw, TABLE_NAME, FAMILY_NAME, NUM_REGIONS_TO_CREATE);
    NonceGeneratorWithDups ng = new NonceGeneratorWithDups();
    NonceGenerator oldNg = ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection) TEST_UTIL.getConnection(), ng);
    try {
        List<Increment> reqs = new ArrayList<>();
        for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
            HRegionServer hrs = rst.getRegionServer();
            List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
            for (HRegionInfo hri : hris) {
                if (TABLE_NAME.equalsIgnoreCase(hri.getTable().getNameAsString())) {
                    byte[] key = hri.getStartKey();
                    if (key == null || key.length == 0) {
                        key = Bytes.copy(hri.getEndKey());
                        --(key[key.length - 1]);
                    }
                    Increment incr = new Increment(key);
                    incr.addColumn(Bytes.toBytes(FAMILY_NAME), Bytes.toBytes("q"), 1);
                    ht.increment(incr);
                    reqs.add(incr);
                }
            }
        }
        HRegionServer hrs = findRSToKill(false, "table");
        abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
        ng.startDups();
        for (Increment incr : reqs) {
            try {
                ht.increment(incr);
                fail("should have thrown");
            } catch (OperationConflictException ope) {
                LOG.debug("Caught as expected: " + ope.getMessage());
            }
        }
    } finally {
        ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection) TEST_UTIL.getConnection(), oldNg);
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ArrayList(java.util.ArrayList) NonceGenerator(org.apache.hadoop.hbase.client.NonceGenerator) PerClientRandomNonceGenerator(org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Increment(org.apache.hadoop.hbase.client.Increment) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) OperationConflictException(org.apache.hadoop.hbase.exceptions.OperationConflictException) Ignore(org.junit.Ignore) Test(org.junit.Test)

Aggregations

Ignore (org.junit.Ignore)5092 Test (org.junit.Test)4807 File (java.io.File)445 ArrayList (java.util.ArrayList)374 IOException (java.io.IOException)217 HashMap (java.util.HashMap)187 List (java.util.List)171 CountDownLatch (java.util.concurrent.CountDownLatch)118 Map (java.util.Map)103 LocalDate (java.time.LocalDate)94 Dataset (org.apache.jena.query.Dataset)93 InputStream (java.io.InputStream)89 Date (java.util.Date)88 Random (java.util.Random)85 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)83 Properties (java.util.Properties)78 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)78 HashSet (java.util.HashSet)71 ByteArrayOutputStream (java.io.ByteArrayOutputStream)70 ExecutorService (java.util.concurrent.ExecutorService)70