Search in sources :

Example 41 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestHRegion method testgetHDFSBlocksDistribution.

@Test
public void testgetHDFSBlocksDistribution() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
    // break up the file in to more pieces that can be distributed across the three nodes and we
    // won't be able to have the condition this test asserts; that at least one node has
    // a copy of all replicas -- if small block size, then blocks are spread evenly across the
    // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
    // final int DEFAULT_BLOCK_SIZE = 1024;
    // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 2);
    // set up a cluster with 3 nodes
    MiniHBaseCluster cluster = null;
    String[] dataNodeHosts = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;
    try {
        cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
        byte[][] families = { fam1, fam2 };
        Table ht = htu.createTable(tableName, families);
        // Setting up region
        byte[] row = Bytes.toBytes("row1");
        byte[] col = Bytes.toBytes("col1");
        Put put = new Put(row);
        put.addColumn(fam1, col, (long) 1, Bytes.toBytes("test1"));
        put.addColumn(fam2, col, (long) 1, Bytes.toBytes("test2"));
        ht.put(put);
        HRegion firstRegion = htu.getHBaseCluster().getRegions(tableName).get(0);
        firstRegion.flush(true);
        HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
        // Given the default replication factor is 2 and we have 2 HFiles,
        // we will have total of 4 replica of blocks on 3 datanodes; thus there
        // must be at least one host that have replica for 2 HFiles. That host's
        // weight will be equal to the unique block weight.
        long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
        StringBuilder sb = new StringBuilder();
        for (String host : blocksDistribution1.getTopHosts()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(host);
            sb.append("=");
            sb.append(blocksDistribution1.getWeight(host));
        }
        String topHost = blocksDistribution1.getTopHosts().get(0);
        long topHostWeight = blocksDistribution1.getWeight(topHost);
        String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
        LOG.info(msg);
        assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
        // use the static method to compute the value, it should be the same.
        // static method is used by load balancer or other components
        HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
        long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();
        assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);
        ht.close();
    } finally {
        if (cluster != null) {
            htu.shutdownMiniCluster();
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Put(org.apache.hadoop.hbase.client.Put) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Test(org.junit.Test)

Example 42 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestHRegionFileSystem method testBlockStoragePolicy.

@Test
public void testBlockStoragePolicy() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    Configuration conf = TEST_UTIL.getConfiguration();
    TEST_UTIL.startMiniCluster();
    HTable table = (HTable) TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table));
    HRegionFileSystem regionFs = getHRegionFS(table, conf);
    // the original block storage policy would be HOT
    String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
    String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
    LOG.debug("Storage policy of cf 0: [" + spA + "].");
    LOG.debug("Storage policy of cf 1: [" + spB + "].");
    assertEquals("HOT", spA);
    assertEquals("HOT", spB);
    // Recreate table and make sure storage policy could be set through configuration
    TEST_UTIL.shutdownMiniCluster();
    TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM");
    TEST_UTIL.startMiniCluster();
    table = (HTable) TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
    regionFs = getHRegionFS(table, conf);
    try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertEquals("WARM", spA);
        assertEquals("WARM", spB);
        // alter table cf schema to change storage policies
        // and make sure it could override settings in conf
        HColumnDescriptor hcdA = new HColumnDescriptor(Bytes.toString(FAMILIES[0]));
        // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor
        hcdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD");
        admin.modifyColumnFamily(TABLE_NAME, hcdA);
        while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().isRegionsInTransition()) {
            Thread.sleep(200);
            LOG.debug("Waiting on table to finish schema altering");
        }
        // alter through HColumnDescriptor#setStoragePolicy
        HColumnDescriptor hcdB = new HColumnDescriptor(Bytes.toString(FAMILIES[1]));
        hcdB.setStoragePolicy("ALL_SSD");
        admin.modifyColumnFamily(TABLE_NAME, hcdB);
        while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().isRegionsInTransition()) {
            Thread.sleep(200);
            LOG.debug("Waiting on table to finish schema altering");
        }
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertNotNull(spA);
        assertEquals("ONE_SSD", spA);
        assertNotNull(spB);
        assertEquals("ALL_SSD", spB);
        // flush memstore snapshot into 3 files
        for (long i = 0; i < 3; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i));
            table.put(put);
            admin.flush(TABLE_NAME);
        }
        // there should be 3 files in store dir
        FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
        Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0]));
        FileStatus[] storeFiles = FSUtils.listStatus(fs, storePath);
        assertNotNull(storeFiles);
        assertEquals(3, storeFiles.length);
        // store temp dir still exists but empty
        Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0]));
        assertTrue(fs.exists(storeTempDir));
        FileStatus[] tempFiles = FSUtils.listStatus(fs, storeTempDir);
        assertNull(tempFiles);
        // storage policy of cf temp dir and 3 store files should be ONE_SSD
        assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir));
        for (FileStatus status : storeFiles) {
            assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath()));
        }
        // change storage policies by calling raw api directly
        regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD");
        regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD");
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertNotNull(spA);
        assertEquals("ALL_SSD", spA);
        assertNotNull(spB);
        assertEquals("ONE_SSD", spB);
    } finally {
        table.close();
        TEST_UTIL.deleteTable(TABLE_NAME);
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTable(org.apache.hadoop.hbase.client.HTable) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) Test(org.junit.Test)

Example 43 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestHRegionInfo method testReadAndWriteHRegionInfoFile.

@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
    Path basedir = htu.getDataTestDir();
    // Create a region.  That'll write the .regioninfo file.
    FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
    HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(), fsTableDescriptors.get(TableName.META_TABLE_NAME));
    // Get modtime on the file.
    long modtime = getModTime(r);
    HBaseTestingUtility.closeRegionAndWAL(r);
    Thread.sleep(1001);
    r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, htu.getConfiguration());
    // Ensure the file is not written for a second time.
    long modtime2 = getModTime(r);
    assertEquals(modtime, modtime2);
    // Now load the file.
    HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
    assertTrue(hri.equals(deserializedHri));
    HBaseTestingUtility.closeRegionAndWAL(r);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) Test(org.junit.Test)

Example 44 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestRemoveRegionMetrics method startCluster.

@BeforeClass
public static void startCluster() throws Exception {
    metricsHelper = CompatibilityFactory.getInstance(MetricsAssertHelper.class);
    TEST_UTIL = new HBaseTestingUtility();
    conf = TEST_UTIL.getConfiguration();
    conf.getLong("hbase.splitlog.max.resubmit", 0);
    // Make the failure test faster
    conf.setInt("zookeeper.recovery.retry", 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
    TEST_UTIL.startMiniCluster(1, 2);
    cluster = TEST_UTIL.getHBaseCluster();
    cluster.waitForActiveAndReadyMaster();
    while (cluster.getLiveRegionServerThreads().size() < 2) {
        Threads.sleep(100);
    }
}
Also used : MetricsAssertHelper(org.apache.hadoop.hbase.test.MetricsAssertHelper) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) BeforeClass(org.junit.BeforeClass)

Example 45 with HBaseTestingUtility

use of org.apache.hadoop.hbase.HBaseTestingUtility in project hbase by apache.

the class TestSCVFWithMiniCluster method setUp.

@BeforeClass
public static void setUp() throws Exception {
    HBaseTestingUtility util = new HBaseTestingUtility();
    util.startMiniCluster(1);
    Admin admin = util.getAdmin();
    destroy(admin, HBASE_TABLE_NAME);
    create(admin, HBASE_TABLE_NAME, FAMILY_A, FAMILY_B);
    admin.close();
    htable = util.getConnection().getTable(HBASE_TABLE_NAME);
    /* Add some values */
    List<Put> puts = new ArrayList<>();
    /* Add a row with 'a:foo' = false */
    Put put = new Put(Bytes.toBytes("1"));
    put.setDurability(Durability.SKIP_WAL);
    put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("false"));
    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    puts.add(put);
    /* Add a row with 'a:foo' = true */
    put = new Put(Bytes.toBytes("2"));
    put.setDurability(Durability.SKIP_WAL);
    put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("true"));
    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    puts.add(put);
    /* Add a row with 'a:foo' qualifier not set */
    put = new Put(Bytes.toBytes("3"));
    put.setDurability(Durability.SKIP_WAL);
    put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
    put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
    puts.add(put);
    htable.put(puts);
    /*
     * We want to filter out from the scan all rows that do not have the column 'a:foo' with value
     * 'false'. Only row with key '1' should be returned in the scan.
     */
    scanFilter = new SingleColumnValueFilter(FAMILY_A, QUALIFIER_FOO, CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("false")));
    ((SingleColumnValueFilter) scanFilter).setFilterIfMissing(true);
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ArrayList(java.util.ArrayList) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) BeforeClass(org.junit.BeforeClass)

Aggregations

HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)136 Configuration (org.apache.hadoop.conf.Configuration)50 BeforeClass (org.junit.BeforeClass)49 Test (org.junit.Test)42 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)35 Path (org.apache.hadoop.fs.Path)29 Admin (org.apache.hadoop.hbase.client.Admin)24 FileSystem (org.apache.hadoop.fs.FileSystem)22 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)20 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)18 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)16 Before (org.junit.Before)14 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)11 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)11 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)10 Table (org.apache.hadoop.hbase.client.Table)8 HFileSystem (org.apache.hadoop.hbase.fs.HFileSystem)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Result (org.apache.hadoop.hbase.client.Result)7