Search in sources :

Example 76 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestMultiVersions method testTimestamps.

/**
 * Tests user specifiable time stamps putting, getting and scanning.  Also
 * tests same in presence of deletes.  Test cores are written so can be
 * run against an HRegion and against an HTable: i.e. both local and remote.
 *
 * <p>Port of old TestTimestamp test to here so can better utilize the spun
 * up cluster running more than a single test per spin up.  Keep old tests'
 * crazyness.
 */
@Test
public void testTimestamps() throws Exception {
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(TimestampTestBase.FAMILY_NAME).setMaxVersions(3).build()).build();
    this.admin.createTable(tableDescriptor);
    Table table = UTIL.getConnection().getTable(tableDescriptor.getTableName());
    // TODO: Remove these deprecated classes or pull them in here if this is
    // only test using them.
    TimestampTestBase.doTestDelete(table, new FlushCache() {

        @Override
        public void flushcache() throws IOException {
            UTIL.getHBaseCluster().flushcache();
        }
    });
    // Perhaps drop and readd the table between tests so the former does
    // not pollute this latter?  Or put into separate tests.
    TimestampTestBase.doTestTimestampScanning(table, new FlushCache() {

        @Override
        public void flushcache() throws IOException {
            UTIL.getMiniHBaseCluster().flushcache();
        }
    });
    table.close();
}
Also used : FlushCache(org.apache.hadoop.hbase.TimestampTestBase.FlushCache) Table(org.apache.hadoop.hbase.client.Table) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 77 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestNamespace method createTableInDefaultNamespace.

@Test
public void createTableInDefaultNamespace() throws Exception {
    TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()));
    ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build();
    tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
    TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
    admin.createTable(tableDescriptor);
    assertTrue(admin.listTableDescriptors().size() == 1);
    admin.disableTable(tableDescriptor.getTableName());
    admin.deleteTable(tableDescriptor.getTableName());
}
Also used : TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 78 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestNamespace method beforeMethod.

@Before
public void beforeMethod() throws IOException {
    for (TableDescriptor desc : admin.listTableDescriptors(Pattern.compile(prefix + ".*"))) {
        admin.disableTable(desc.getTableName());
        admin.deleteTable(desc.getTableName());
    }
    for (NamespaceDescriptor ns : admin.listNamespaceDescriptors()) {
        if (ns.getName().startsWith(prefix)) {
            admin.deleteNamespace(ns.getName());
        }
    }
}
Also used : TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Before(org.junit.Before)

Example 79 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestNamespace method verifyReservedNS.

@Test
public void verifyReservedNS() throws IOException {
    // verify existence of reserved namespaces
    NamespaceDescriptor ns = admin.getNamespaceDescriptor(NamespaceDescriptor.DEFAULT_NAMESPACE.getName());
    assertNotNull(ns);
    assertEquals(ns.getName(), NamespaceDescriptor.DEFAULT_NAMESPACE.getName());
    ns = admin.getNamespaceDescriptor(NamespaceDescriptor.SYSTEM_NAMESPACE.getName());
    assertNotNull(ns);
    assertEquals(ns.getName(), NamespaceDescriptor.SYSTEM_NAMESPACE.getName());
    assertEquals(2, admin.listNamespaces().length);
    assertEquals(2, admin.listNamespaceDescriptors().length);
    // verify existence of system tables
    Set<TableName> systemTables = Sets.newHashSet(TableName.META_TABLE_NAME);
    List<TableDescriptor> descs = admin.listTableDescriptorsByNamespace(Bytes.toBytes(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()));
    assertEquals(systemTables.size(), descs.size());
    for (TableDescriptor desc : descs) {
        assertTrue(systemTables.contains(desc.getTableName()));
    }
    // verify system tables aren't listed
    assertEquals(0, admin.listTableDescriptors().size());
    // Try creating default and system namespaces.
    boolean exceptionCaught = false;
    try {
        admin.createNamespace(NamespaceDescriptor.DEFAULT_NAMESPACE);
    } catch (IOException exp) {
        LOG.warn(exp.toString(), exp);
        exceptionCaught = true;
    } finally {
        assertTrue(exceptionCaught);
    }
    exceptionCaught = false;
    try {
        admin.createNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE);
    } catch (IOException exp) {
        LOG.warn(exp.toString(), exp);
        exceptionCaught = true;
    } finally {
        assertTrue(exceptionCaught);
    }
}
Also used : IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 80 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestRegionPlacement method verifyRegionServerUpdated.

/**
 * Verify all the online region servers has been updated to the
 * latest assignment plan
 * @param plan
 * @throws IOException
 */
private void verifyRegionServerUpdated(FavoredNodesPlan plan) throws IOException {
    // Verify all region servers contain the correct favored nodes information
    SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    for (int i = 0; i < SLAVES; i++) {
        HRegionServer rs = cluster.getRegionServer(i);
        for (Region region : rs.getRegions(TableName.valueOf("testRegionAssignment"))) {
            InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion(region.getRegionInfo().getEncodedName());
            String regionName = region.getRegionInfo().getRegionNameAsString();
            List<ServerName> favoredServerList = plan.getAssignmentMap().get(regionName);
            // except for hbase:meta and ROOT
            if (favoredServerList == null) {
                TableDescriptor desc = region.getTableDescriptor();
                // Verify they are ROOT and hbase:meta regions since no favored nodes
                assertNull(favoredSocketAddress);
                assertTrue("User region " + region.getTableDescriptor().getTableName() + " should have favored nodes", desc.isMetaRegion());
            } else {
                // For user region, the favored nodes in the region server should be
                // identical to favored nodes in the assignmentPlan
                assertTrue(favoredSocketAddress.length == favoredServerList.size());
                assertTrue(favoredServerList.size() > 0);
                for (int j = 0; j < favoredServerList.size(); j++) {
                    InetSocketAddress addrFromRS = favoredSocketAddress[j];
                    InetSocketAddress addrFromPlan = InetSocketAddress.createUnresolved(favoredServerList.get(j).getHostname(), favoredServerList.get(j).getPort());
                    assertNotNull(addrFromRS);
                    assertNotNull(addrFromPlan);
                    assertTrue("Region server " + rs.getServerName().getAddress() + " has the " + positions[j] + " for region " + region.getRegionInfo().getRegionNameAsString() + " is " + addrFromRS + " which is inconsistent with the plan " + addrFromPlan, addrFromRS.equals(addrFromPlan));
                }
            }
        }
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) InetSocketAddress(java.net.InetSocketAddress) ServerName(org.apache.hadoop.hbase.ServerName) Region(org.apache.hadoop.hbase.regionserver.Region) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39