Search in sources :

Example 66 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestZooKeeper method testSanity.

/**
 * Make sure we can use the cluster
 */
private void testSanity(final String testName) throws Exception {
    String tableName = testName + "_" + EnvironmentEdgeManager.currentTime();
    TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam")).build();
    LOG.info("Creating table " + tableName);
    Admin admin = TEST_UTIL.getAdmin();
    try {
        admin.createTable(desc);
    } finally {
        admin.close();
    }
    Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
    Put put = new Put(Bytes.toBytes("testrow"));
    put.addColumn(Bytes.toBytes("fam"), Bytes.toBytes("col"), Bytes.toBytes("testdata"));
    LOG.info("Putting table " + tableName);
    table.put(put);
    table.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put)

Example 67 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestMultiVersions method testScanMultipleVersions.

/**
 * Port of old TestScanMultipleVersions test here so can better utilize the
 * spun up cluster running more than just a single test.  Keep old tests
 * crazyness.
 *
 * <p>Tests five cases of scans and timestamps.
 */
@Test
public void testScanMultipleVersions() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build();
    final byte[][] rows = new byte[][] { Bytes.toBytes("row_0200"), Bytes.toBytes("row_0800") };
    final byte[][] splitRows = new byte[][] { Bytes.toBytes("row_0500") };
    final long[] timestamp = new long[] { 100L, 1000L };
    this.admin.createTable(tableDescriptor, splitRows);
    Table table = UTIL.getConnection().getTable(tableName);
    // Assert we got the region layout wanted.
    Pair<byte[][], byte[][]> keys = UTIL.getConnection().getRegionLocator(tableName).getStartEndKeys();
    assertEquals(2, keys.getFirst().length);
    byte[][] startKeys = keys.getFirst();
    byte[][] endKeys = keys.getSecond();
    for (int i = 0; i < startKeys.length; i++) {
        if (i == 0) {
            assertTrue(Bytes.equals(HConstants.EMPTY_START_ROW, startKeys[i]));
            assertTrue(Bytes.equals(endKeys[i], splitRows[0]));
        } else if (i == 1) {
            assertTrue(Bytes.equals(splitRows[0], startKeys[i]));
            assertTrue(Bytes.equals(endKeys[i], HConstants.EMPTY_END_ROW));
        }
    }
    // Insert data
    List<Put> puts = new ArrayList<>();
    for (int i = 0; i < startKeys.length; i++) {
        for (int j = 0; j < timestamp.length; j++) {
            Put put = new Put(rows[i], timestamp[j]);
            put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j], Bytes.toBytes(timestamp[j]));
            puts.add(put);
        }
    }
    table.put(puts);
    // There are 5 cases we have to test. Each is described below.
    for (int i = 0; i < rows.length; i++) {
        for (int j = 0; j < timestamp.length; j++) {
            Get get = new Get(rows[i]);
            get.addFamily(HConstants.CATALOG_FAMILY);
            get.setTimestamp(timestamp[j]);
            Result result = table.get(get);
            int cellCount = 0;
            for (@SuppressWarnings("unused") Cell kv : result.listCells()) {
                cellCount++;
            }
            assertTrue(cellCount == 1);
        }
    }
    // Case 1: scan with LATEST_TIMESTAMP. Should get two rows
    int count = 0;
    Scan scan = new Scan();
    scan.addFamily(HConstants.CATALOG_FAMILY);
    ResultScanner s = table.getScanner(scan);
    try {
        for (Result rr = null; (rr = s.next()) != null; ) {
            System.out.println(rr.toString());
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }
    // Case 2: Scan with a timestamp greater than most recent timestamp
    // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows.
    count = 0;
    scan = new Scan();
    scan.setTimeRange(1000L, Long.MAX_VALUE);
    scan.addFamily(HConstants.CATALOG_FAMILY);
    s = table.getScanner(scan);
    try {
        while (s.next() != null) {
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }
    // Case 3: scan with timestamp equal to most recent timestamp
    // (in this case == 1000. Should get 2 rows.
    count = 0;
    scan = new Scan();
    scan.setTimestamp(1000L);
    scan.addFamily(HConstants.CATALOG_FAMILY);
    s = table.getScanner(scan);
    try {
        while (s.next() != null) {
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }
    // Case 4: scan with timestamp greater than first timestamp but less than
    // second timestamp (100 < timestamp < 1000). Should get 2 rows.
    count = 0;
    scan = new Scan();
    scan.setTimeRange(100L, 1000L);
    scan.addFamily(HConstants.CATALOG_FAMILY);
    s = table.getScanner(scan);
    try {
        while (s.next() != null) {
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }
    // Case 5: scan with timestamp equal to first timestamp (100)
    // Should get 2 rows.
    count = 0;
    scan = new Scan();
    scan.setTimestamp(100L);
    scan.addFamily(HConstants.CATALOG_FAMILY);
    s = table.getScanner(scan);
    try {
        while (s.next() != null) {
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Get(org.apache.hadoop.hbase.client.Get) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 68 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestHBaseMetaEdit method testAlterMetaWithReadOnly.

/**
 * Validate whether meta table can be altered as READ only, shouldn't be allowed otherwise it will
 * break assignment functionalities. See HBASE-24977.
 */
@Test
public void testAlterMetaWithReadOnly() throws IOException {
    Admin admin = UTIL.getAdmin();
    TableDescriptor origMetaTableDesc = admin.getDescriptor(TableName.META_TABLE_NAME);
    assertFalse(origMetaTableDesc.isReadOnly());
    TableDescriptor newTD = TableDescriptorBuilder.newBuilder(origMetaTableDesc).setReadOnly(true).build();
    try {
        admin.modifyTable(newTD);
        fail("Meta table can't be set as read only");
    } catch (Exception e) {
        assertFalse(admin.getDescriptor(TableName.META_TABLE_NAME).isReadOnly());
    }
    // Create a table to check region assignment & meta operation
    TableName tableName = TableName.valueOf("tempTable");
    TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setReadOnly(true).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f1")).build()).build();
    UTIL.getAdmin().createTable(td);
    UTIL.deleteTable(tableName);
}
Also used : Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) IOException(java.io.IOException) Test(org.junit.Test)

Example 69 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestHBaseMetaEdit method testEditMeta.

/**
 * Set versions, set HBASE-16213 indexed block encoding, and add a column family.
 * Delete the column family. Then try to delete a core hbase:meta family (should fail).
 * Verify they are all in place by looking at TableDescriptor AND by checking
 * what the RegionServer sees after opening Region.
 */
@Test
public void testEditMeta() throws IOException {
    Admin admin = UTIL.getAdmin();
    admin.tableExists(TableName.META_TABLE_NAME);
    TableDescriptor originalDescriptor = getMetaDescriptor();
    ColumnFamilyDescriptor cfd = originalDescriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
    int oldVersions = cfd.getMaxVersions();
    // Add '1' to current versions count. Set encoding too.
    cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(oldVersions + 1).setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING, DataBlockEncoding.ROW_INDEX_V1.toString()).build();
    admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd);
    byte[] extraColumnFamilyName = Bytes.toBytes("xtra");
    ColumnFamilyDescriptor newCfd = ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
    admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
    TableDescriptor descriptor = getMetaDescriptor();
    // Assert new max versions is == old versions plus 1.
    assertEquals(oldVersions + 1, descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
    descriptor = getMetaDescriptor();
    // Assert new max versions is == old versions plus 1.
    assertEquals(oldVersions + 1, descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
    assertTrue(descriptor.getColumnFamily(newCfd.getName()) != null);
    String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
    assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
    Region r = UTIL.getHBaseCluster().getRegionServer(0).getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
    assertEquals(oldVersions + 1, r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
    encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
    assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
    assertTrue(r.getStore(extraColumnFamilyName) != null);
    // Assert we can't drop critical hbase:meta column family but we can drop any other.
    admin.deleteColumnFamily(TableName.META_TABLE_NAME, newCfd.getName());
    descriptor = getMetaDescriptor();
    assertTrue(descriptor.getColumnFamily(newCfd.getName()) == null);
    try {
        admin.deleteColumnFamily(TableName.META_TABLE_NAME, HConstants.CATALOG_FAMILY);
        fail("Should not reach here");
    } catch (HBaseIOException hioe) {
        assertTrue(hioe.getMessage().contains("Delete of hbase:meta"));
    }
}
Also used : Region(org.apache.hadoop.hbase.regionserver.Region) Admin(org.apache.hadoop.hbase.client.Admin) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 70 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestFSTableDescriptorForceCreation method testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor.

@Test
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor() throws Exception {
    final String name = this.name.getMethodName();
    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
    Path rootdir = new Path(UTIL.getDataTestDir(), name);
    FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
    fstd.createTableDescriptor(htd, false);
    assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, true));
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39