Search in sources :

Example 41 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestConstraint method testConstraintPasses.

/**
   * Test that we run a passing constraint
   * @throws Exception
   */
@SuppressWarnings("unchecked")
@Test
public void testConstraintPasses() throws Exception {
    // create the table
    // it would be nice if this was also a method on the util
    HTableDescriptor desc = new HTableDescriptor(tableName);
    for (byte[] family : new byte[][] { dummy, test }) {
        desc.addFamily(new HColumnDescriptor(family));
    }
    // add a constraint
    Constraints.add(desc, CheckWasRunConstraint.class);
    util.getAdmin().createTable(desc);
    Table table = util.getConnection().getTable(tableName);
    try {
        // test that we don't fail on a valid put
        Put put = new Put(row1);
        byte[] value = Integer.toString(10).getBytes();
        byte[] qualifier = new byte[0];
        put.addColumn(dummy, qualifier, value);
        table.put(put);
    } finally {
        table.close();
    }
    assertTrue(CheckWasRunConstraint.wasRun);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 42 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestConstraint method testDisableConstraint.

/**
   * Check that if we just disable one constraint, then
   * @throws Throwable
   */
@SuppressWarnings("unchecked")
@Test
public void testDisableConstraint() throws Throwable {
    // create the table
    HTableDescriptor desc = new HTableDescriptor(tableName);
    // add a family to the table
    for (byte[] family : new byte[][] { dummy, test }) {
        desc.addFamily(new HColumnDescriptor(family));
    }
    // add a constraint to make sure it others get run
    Constraints.add(desc, CheckWasRunConstraint.class);
    // Add Constraint to check
    Constraints.add(desc, AllFailConstraint.class);
    // and then disable the failing constraint
    Constraints.disableConstraint(desc, AllFailConstraint.class);
    util.getAdmin().createTable(desc);
    Table table = util.getConnection().getTable(tableName);
    try {
        // test that we don't fail because its disabled
        Put put = new Put(row1);
        byte[] qualifier = new byte[0];
        put.addColumn(dummy, qualifier, "pass".getBytes());
        table.put(put);
    } finally {
        table.close();
    }
    assertTrue(CheckWasRunConstraint.wasRun);
}
Also used : Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 43 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestPrefetch method testPrefetchSetInHCDWorks.

@Test
public void testPrefetchSetInHCDWorks() {
    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("f"));
    hcd.setPrefetchBlocksOnOpen(true);
    Configuration c = HBaseConfiguration.create();
    assertFalse(c.getBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false));
    CacheConfig cc = new CacheConfig(c, hcd);
    assertTrue(cc.shouldPrefetchOnOpen());
}
Also used : HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Test(org.junit.Test)

Example 44 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestAssignmentManagerOnCluster method testOpenCloseRacing.

/**
   * This tests region close racing with open
   */
@Test(timeout = 60000)
public void testOpenCloseRacing() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(FAMILY));
        admin.createTable(desc);
        Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
        HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
        MetaTableAccessor.addRegionToMeta(meta, hri);
        meta.close();
        MyRegionObserver.postOpenEnabled.set(true);
        MyRegionObserver.postOpenCalled = false;
        HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
        AssignmentManager am = master.getAssignmentManager();
        // Region will be opened, but it won't complete
        am.assign(hri);
        long end = EnvironmentEdgeManager.currentTime() + 20000;
        // Wait till postOpen is called
        while (!MyRegionObserver.postOpenCalled) {
            assertFalse("Timed out waiting for postOpen to be called", EnvironmentEdgeManager.currentTime() > end);
            Thread.sleep(300);
        }
        // Now let's unassign it, it should do nothing
        am.unassign(hri);
        RegionState state = am.getRegionStates().getRegionState(hri);
        ServerName oldServerName = state.getServerName();
        assertTrue(state.isOpening() && oldServerName != null);
        // Now the region is stuck in opening
        // Let's forcefully re-assign it to trigger closing/opening
        // racing. This test is to make sure this scenario
        // is handled properly.
        MyRegionObserver.postOpenEnabled.set(false);
        ServerName destServerName = null;
        int numRS = TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size();
        for (int i = 0; i < numRS; i++) {
            HRegionServer destServer = TEST_UTIL.getHBaseCluster().getRegionServer(i);
            if (!destServer.getServerName().equals(oldServerName)) {
                destServerName = destServer.getServerName();
                break;
            }
        }
        assertNotNull(destServerName);
        assertFalse("Region should be assigned on a new region server", oldServerName.equals(destServerName));
        List<HRegionInfo> regions = new ArrayList<>();
        regions.add(hri);
        am.assign(destServerName, regions);
        // let's check if it's assigned after it's out of transition
        am.waitOnRegionToClearRegionsInTransition(hri);
        assertTrue(am.waitForAssignment(hri));
        ServerName serverName = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(hri);
        TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 6000);
    } finally {
        MyRegionObserver.postOpenEnabled.set(false);
        TEST_UTIL.deleteTable(tableName);
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Example 45 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestAssignmentManagerOnCluster method testAssignDisabledRegionBySSH.

/**
   * Test disabled region is ignored by SSH
   */
@Test(timeout = 60000)
public void testAssignDisabledRegionBySSH() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    MyMaster master;
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(FAMILY));
        admin.createTable(desc);
        Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
        HRegionInfo hri = new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
        MetaTableAccessor.addRegionToMeta(meta, hri);
        // Assign the region
        master = (MyMaster) cluster.getMaster();
        AssignmentManager am = master.getAssignmentManager();
        am.assign(hri);
        RegionStates regionStates = am.getRegionStates();
        ServerName metaServer = regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO);
        ServerName oldServerName = null;
        while (true) {
            assertTrue(am.waitForAssignment(hri));
            RegionState state = regionStates.getRegionState(hri);
            oldServerName = state.getServerName();
            if (!ServerName.isSameHostnameAndPort(oldServerName, metaServer)) {
                // Mark the hosting server aborted, but don't actually kill it.
                // It doesn't have meta on it.
                MyRegionServer.abortedServer = oldServerName;
                break;
            }
            int i = cluster.getServerWithMeta();
            HRegionServer rs = cluster.getRegionServer(i == 0 ? 1 : 0);
            oldServerName = rs.getServerName();
            master.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(oldServerName.getServerName()));
        }
        // Make sure the region is assigned on the dead server
        assertTrue(regionStates.isRegionOnline(hri));
        assertEquals(oldServerName, regionStates.getRegionServerOfRegion(hri));
        // Disable the table now.
        master.disableTable(hri.getTable(), HConstants.NO_NONCE, HConstants.NO_NONCE);
        // Kill the hosting server, which doesn't have meta on it.
        cluster.killRegionServer(oldServerName);
        cluster.waitForRegionServerToStop(oldServerName, -1);
        ServerManager serverManager = master.getServerManager();
        while (!serverManager.isServerDead(oldServerName) || serverManager.getDeadServers().areDeadServersInProgress()) {
            Thread.sleep(100);
        }
        // Wait till no more RIT, the region should be offline.
        TEST_UTIL.waitUntilNoRegionsInTransition(60000);
        assertTrue(regionStates.isRegionOffline(hri));
    } finally {
        MyRegionServer.abortedServer = null;
        TEST_UTIL.deleteTable(tableName);
        cluster.startRegionServer();
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)679 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)561 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)118 IOException (java.io.IOException)112 Admin (org.apache.hadoop.hbase.client.Admin)112 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)74 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)52 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38