Search in sources :

Example 61 with TableDescriptorBuilder

use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.

the class TestSplitTransactionOnCluster method testContinuousSplitUsingLinkFile.

@Test
public void testContinuousSplitUsingLinkFile() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // Create table then get the single region for our new table.
    byte[] cf = Bytes.toBytes("cf");
    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(cf));
    String splitPolicy = ConstantSizeRegionSplitPolicy.class.getName();
    builder.setValue(SPLIT_POLICY, splitPolicy);
    admin.createTable(builder.build());
    admin.compactionSwitch(false, new ArrayList<>());
    assertNotEquals("Unable to retrieve regions of the table", -1, TESTING_UTIL.waitFor(10000, () -> cluster.getRegions(tableName).size() == 1));
    Table table = TESTING_UTIL.getConnection().getTable(tableName);
    // insert data
    insertData(tableName, admin, table, 10);
    insertData(tableName, admin, table, 20);
    insertData(tableName, admin, table, 40);
    int rowCount = 3 * 4;
    Scan scan = new Scan();
    scanValidate(scan, rowCount, table);
    // Split
    admin.splitRegionAsync(cluster.getRegions(tableName).get(0).getRegionInfo().getRegionName(), Bytes.toBytes("row14"));
    // wait for the split to complete or get interrupted.  If the split completes successfully,
    // the procedure will return true; if the split fails, the procedure would throw exception.
    Thread.sleep(3000);
    assertNotEquals("Table is not split properly?", -1, TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 2));
    // we have 2 daughter regions
    HRegion hRegion1 = cluster.getRegions(tableName).get(0);
    HRegion hRegion2 = cluster.getRegions(tableName).get(1);
    HStore hStore1 = hRegion1.getStore(cf);
    HStore hStore2 = hRegion2.getStore(cf);
    // the sum of store files of the two children should be equal to their parent
    assertEquals(3, hStore1.getStorefilesCount() + hStore2.getStorefilesCount());
    // both the two children should have link files
    for (StoreFile sf : hStore1.getStorefiles()) {
        assertTrue(HFileLink.isHFileLink(sf.getPath()));
    }
    for (StoreFile sf : hStore2.getStorefiles()) {
        assertTrue(HFileLink.isHFileLink(sf.getPath()));
    }
    // validate children data
    scan = new Scan();
    scanValidate(scan, rowCount, table);
    // Continuous Split
    findRegionToSplit(tableName, "row24");
    Thread.sleep(3000);
    assertNotEquals("Table is not split properly?", -1, TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 3));
    // now table has 3 region, each region should have one link file
    for (HRegion newRegion : cluster.getRegions(tableName)) {
        assertEquals(1, newRegion.getStore(cf).getStorefilesCount());
        assertTrue(HFileLink.isHFileLink(newRegion.getStore(cf).getStorefiles().iterator().next().getPath()));
    }
    scan = new Scan();
    scanValidate(scan, rowCount, table);
    // Continuous Split, random split HFileLink, generate Reference files.
    // After this, can not continuous split, because there are reference files.
    findRegionToSplit(tableName, "row11");
    Thread.sleep(3000);
    assertNotEquals("Table is not split properly?", -1, TESTING_UTIL.waitFor(3000, () -> cluster.getRegions(tableName).size() == 4));
    scan = new Scan();
    scanValidate(scan, rowCount, table);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 62 with TableDescriptorBuilder

use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.

the class TestResettingCounters method testResettingCounters.

@Test
public void testResettingCounters() throws Exception {
    HBaseTestingUtil htu = new HBaseTestingUtil();
    Configuration conf = htu.getConfiguration();
    FileSystem fs = FileSystem.get(conf);
    byte[] table = Bytes.toBytes(name.getMethodName());
    byte[][] families = new byte[][] { Bytes.toBytes("family1"), Bytes.toBytes("family2"), Bytes.toBytes("family3") };
    int numQualifiers = 10;
    byte[][] qualifiers = new byte[numQualifiers][];
    for (int i = 0; i < numQualifiers; i++) qualifiers[i] = Bytes.toBytes("qf" + i);
    int numRows = 10;
    byte[][] rows = new byte[numRows][];
    for (int i = 0; i < numRows; i++) rows[i] = Bytes.toBytes("r" + i);
    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(table));
    for (byte[] family : families) {
        builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
    }
    TableDescriptor tableDescriptor = builder.build();
    RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
    String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
    Path path = new Path(testDir);
    if (fs.exists(path)) {
        if (!fs.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    HRegion region = HBaseTestingUtil.createRegionAndWAL(hri, path, conf, tableDescriptor);
    try {
        Increment odd = new Increment(rows[0]);
        odd.setDurability(Durability.SKIP_WAL);
        Increment even = new Increment(rows[0]);
        even.setDurability(Durability.SKIP_WAL);
        Increment all = new Increment(rows[0]);
        all.setDurability(Durability.SKIP_WAL);
        for (int i = 0; i < numQualifiers; i++) {
            if (i % 2 == 0)
                even.addColumn(families[0], qualifiers[i], 1);
            else
                odd.addColumn(families[0], qualifiers[i], 1);
            all.addColumn(families[0], qualifiers[i], 1);
        }
        // increment odd qualifiers 5 times and flush
        for (int i = 0; i < 5; i++) region.increment(odd, HConstants.NO_NONCE, HConstants.NO_NONCE);
        region.flush(true);
        // increment even qualifiers 5 times
        for (int i = 0; i < 5; i++) region.increment(even, HConstants.NO_NONCE, HConstants.NO_NONCE);
        // increment all qualifiers, should have value=6 for all
        Result result = region.increment(all, HConstants.NO_NONCE, HConstants.NO_NONCE);
        assertEquals(numQualifiers, result.size());
        Cell[] kvs = result.rawCells();
        for (int i = 0; i < kvs.length; i++) {
            System.out.println(kvs[i].toString());
            assertTrue(CellUtil.matchingQualifier(kvs[i], qualifiers[i]));
            assertEquals(6, Bytes.toLong(CellUtil.cloneValue(kvs[i])));
        }
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(region);
    }
    HBaseTestingUtil.closeRegionAndWAL(region);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Result(org.apache.hadoop.hbase.client.Result) FileSystem(org.apache.hadoop.fs.FileSystem) Increment(org.apache.hadoop.hbase.client.Increment) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 63 with TableDescriptorBuilder

use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.

the class TestRegionServerReadRequestMetrics method createTable.

private static Table createTable() throws IOException {
    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME);
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF1));
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(CF2).setTimeToLive(TTL).build());
    admin.createTable(builder.build());
    return TEST_UTIL.getConnection().getTable(TABLE_NAME);
}
Also used : TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder)

Example 64 with TableDescriptorBuilder

use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.

the class SnapshotScannerHDFSAclController method postStartMaster.

@Override
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException {
    if (!initialized) {
        return;
    }
    try (Admin admin = c.getEnvironment().getConnection().getAdmin()) {
        if (admin.tableExists(PermissionStorage.ACL_TABLE_NAME)) {
            // Check if acl table has 'm' CF, if not, add 'm' CF
            TableDescriptor tableDescriptor = admin.getDescriptor(PermissionStorage.ACL_TABLE_NAME);
            boolean containHdfsAclFamily = Arrays.stream(tableDescriptor.getColumnFamilies()).anyMatch(family -> Bytes.equals(family.getName(), SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY));
            if (!containHdfsAclFamily) {
                TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY).build());
                admin.modifyTable(builder.build());
            }
            aclTableInitialized = true;
        } else {
            throw new TableNotFoundException("Table " + PermissionStorage.ACL_TABLE_NAME + " is not created yet. Please check if " + getClass().getName() + " is configured after " + AccessController.class.getName());
        }
    }
}
Also used : TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 65 with TableDescriptorBuilder

use of org.apache.hadoop.hbase.client.TableDescriptorBuilder in project hbase by apache.

the class BulkLoadHFilesTool method createTable.

/**
 * If the table is created for the first time, then "completebulkload" reads the files twice. More
 * modifications necessary if we want to avoid doing it.
 */
private void createTable(TableName tableName, Path hfofDir, AsyncAdmin admin) throws IOException {
    final FileSystem fs = hfofDir.getFileSystem(getConf());
    // Add column families
    // Build a set of keys
    List<ColumnFamilyDescriptorBuilder> familyBuilders = new ArrayList<>();
    SortedMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor<ColumnFamilyDescriptorBuilder>() {

        @Override
        public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) {
            ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(familyName);
            familyBuilders.add(builder);
            return builder;
        }

        @Override
        public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus) throws IOException {
            Path hfile = hfileStatus.getPath();
            try (HFile.Reader reader = HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) {
                if (builder.getCompressionType() != reader.getFileContext().getCompression()) {
                    builder.setCompressionType(reader.getFileContext().getCompression());
                    LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + " for family " + builder.getNameAsString());
                }
                byte[] first = reader.getFirstRowKey().get();
                byte[] last = reader.getLastRowKey().get();
                LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
                // To eventually infer start key-end key boundaries
                Integer value = map.getOrDefault(first, 0);
                map.put(first, value + 1);
                value = map.containsKey(last) ? map.get(last) : 0;
                map.put(last, value - 1);
            }
        }
    }, true);
    byte[][] keys = inferBoundaries(map);
    TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName);
    familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build).forEachOrdered(tdBuilder::setColumnFamily);
    FutureUtils.get(admin.createTable(tdBuilder.build(), keys));
    LOG.info("Table " + tableName + " is available!!");
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) ArrayList(java.util.ArrayList) HalfStoreFileReader(org.apache.hadoop.hbase.io.HalfStoreFileReader) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) FileSystem(org.apache.hadoop.fs.FileSystem)

Aggregations

TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)190 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)76 Test (org.junit.Test)68 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)61 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)47 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)39 TableName (org.apache.hadoop.hbase.TableName)34 Path (org.apache.hadoop.fs.Path)31 Admin (org.apache.hadoop.hbase.client.Admin)29 Put (org.apache.hadoop.hbase.client.Put)25 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)20 Table (org.apache.hadoop.hbase.client.Table)18 ArrayList (java.util.ArrayList)17 FileSystem (org.apache.hadoop.fs.FileSystem)15 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)15 Before (org.junit.Before)12 Cell (org.apache.hadoop.hbase.Cell)11 NamespaceDescriptor (org.apache.hadoop.hbase.NamespaceDescriptor)10 HashMap (java.util.HashMap)9