Search in sources :

Example 26 with ColumnFamilyDescriptorBuilder

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.

the class TestVisibilityLabelsWithDeletes method createTable.

private void createTable(TableName tableName, int maxVersions) throws IOException {
    ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(fam);
    if (maxVersions > 0) {
        builder.setMaxVersions(maxVersions);
    }
    TEST_UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(builder.build()).build());
}
Also used : ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)

Example 27 with ColumnFamilyDescriptorBuilder

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.

the class BulkLoadHFilesTool method createTable.

/**
 * If the table is created for the first time, then "completebulkload" reads the files twice. More
 * modifications necessary if we want to avoid doing it.
 */
private void createTable(TableName tableName, Path hfofDir, AsyncAdmin admin) throws IOException {
    final FileSystem fs = hfofDir.getFileSystem(getConf());
    // Add column families
    // Build a set of keys
    List<ColumnFamilyDescriptorBuilder> familyBuilders = new ArrayList<>();
    SortedMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor<ColumnFamilyDescriptorBuilder>() {

        @Override
        public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) {
            ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(familyName);
            familyBuilders.add(builder);
            return builder;
        }

        @Override
        public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus) throws IOException {
            Path hfile = hfileStatus.getPath();
            try (HFile.Reader reader = HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) {
                if (builder.getCompressionType() != reader.getFileContext().getCompression()) {
                    builder.setCompressionType(reader.getFileContext().getCompression());
                    LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + " for family " + builder.getNameAsString());
                }
                byte[] first = reader.getFirstRowKey().get();
                byte[] last = reader.getLastRowKey().get();
                LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
                // To eventually infer start key-end key boundaries
                Integer value = map.getOrDefault(first, 0);
                map.put(first, value + 1);
                value = map.containsKey(last) ? map.get(last) : 0;
                map.put(last, value - 1);
            }
        }
    }, true);
    byte[][] keys = inferBoundaries(map);
    TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName);
    familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build).forEachOrdered(tdBuilder::setColumnFamily);
    FutureUtils.get(admin.createTable(tdBuilder.build(), keys));
    LOG.info("Table " + tableName + " is available!!");
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) ArrayList(java.util.ArrayList) HalfStoreFileReader(org.apache.hadoop.hbase.io.HalfStoreFileReader) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) FileSystem(org.apache.hadoop.fs.FileSystem)

Example 28 with ColumnFamilyDescriptorBuilder

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.

the class TestHRegion method testDataInMemoryWithoutWAL.

private static void testDataInMemoryWithoutWAL(HRegion region, Put originalPut, final Put addPut, long delta) throws IOException {
    final long initSize = region.getDataInMemoryWithoutWAL();
    // save normalCPHost and replaced by mockedCPHost
    RegionCoprocessorHost normalCPHost = region.getCoprocessorHost();
    RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class);
    // Because the preBatchMutate returns void, we can't do usual Mockito when...then form. Must
    // do below format (from Mockito doc).
    Mockito.doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            MiniBatchOperationInProgress<Mutation> mb = invocation.getArgument(0);
            mb.addOperationsFromCP(0, new Mutation[] { addPut });
            return null;
        }
    }).when(mockedCPHost).preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class));
    ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_FAMILY_BYTES);
    ScanInfo info = new ScanInfo(CONF, builder.build(), Long.MAX_VALUE, Long.MAX_VALUE, region.getCellComparator());
    Mockito.when(mockedCPHost.preFlushScannerOpen(Mockito.any(HStore.class), Mockito.any())).thenReturn(info);
    Mockito.when(mockedCPHost.preFlush(Mockito.any(), Mockito.any(StoreScanner.class), Mockito.any())).thenAnswer(i -> i.getArgument(1));
    region.setCoprocessorHost(mockedCPHost);
    region.put(originalPut);
    region.setCoprocessorHost(normalCPHost);
    final long finalSize = region.getDataInMemoryWithoutWAL();
    assertEquals("finalSize:" + finalSize + ", initSize:" + initSize + ", delta:" + delta, finalSize, initSize + delta);
}
Also used : ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 29 with ColumnFamilyDescriptorBuilder

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.

the class TestHRegionFileSystem method testBlockStoragePolicy.

@Test
public void testBlockStoragePolicy() throws Exception {
    TEST_UTIL = new HBaseTestingUtil();
    Configuration conf = TEST_UTIL.getConfiguration();
    TEST_UTIL.startMiniCluster();
    Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table));
    HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
    // the original block storage policy would be HOT
    String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
    String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
    LOG.debug("Storage policy of cf 0: [" + spA + "].");
    LOG.debug("Storage policy of cf 1: [" + spB + "].");
    assertEquals("HOT", spA);
    assertEquals("HOT", spB);
    // Recreate table and make sure storage policy could be set through configuration
    TEST_UTIL.shutdownMiniCluster();
    TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM");
    TEST_UTIL.startMiniCluster();
    table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
    regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
    try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertEquals("WARM", spA);
        assertEquals("WARM", spB);
        // alter table cf schema to change storage policies
        // and make sure it could override settings in conf
        ColumnFamilyDescriptorBuilder cfdA = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]);
        // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor
        cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD");
        admin.modifyColumnFamily(TABLE_NAME, cfdA.build());
        while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition()) {
            Thread.sleep(200);
            LOG.debug("Waiting on table to finish schema altering");
        }
        // alter through HColumnDescriptor#setStoragePolicy
        ColumnFamilyDescriptorBuilder cfdB = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]);
        cfdB.setStoragePolicy("ALL_SSD");
        admin.modifyColumnFamily(TABLE_NAME, cfdB.build());
        while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition()) {
            Thread.sleep(200);
            LOG.debug("Waiting on table to finish schema altering");
        }
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertNotNull(spA);
        assertEquals("ONE_SSD", spA);
        assertNotNull(spB);
        assertEquals("ALL_SSD", spB);
        // flush memstore snapshot into 3 files
        for (long i = 0; i < 3; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i));
            table.put(put);
            admin.flush(TABLE_NAME);
        }
        // there should be 3 files in store dir
        FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
        Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0]));
        FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, storePath);
        assertNotNull(storeFiles);
        assertEquals(3, storeFiles.length);
        // store temp dir still exists but empty
        Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0]));
        assertTrue(fs.exists(storeTempDir));
        FileStatus[] tempFiles = CommonFSUtils.listStatus(fs, storeTempDir);
        assertNull(tempFiles);
        // storage policy of cf temp dir and 3 store files should be ONE_SSD
        assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir));
        for (FileStatus status : storeFiles) {
            assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath()));
        }
        // change storage policies by calling raw api directly
        regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD");
        regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD");
        spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
        spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
        LOG.debug("Storage policy of cf 0: [" + spA + "].");
        LOG.debug("Storage policy of cf 1: [" + spB + "].");
        assertNotNull(spA);
        assertEquals("ALL_SSD", spA);
        assertNotNull(spB);
        assertEquals("ONE_SSD", spB);
    } finally {
        table.close();
        TEST_UTIL.deleteTable(TABLE_NAME);
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) Test(org.junit.Test)

Example 30 with ColumnFamilyDescriptorBuilder

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.

the class TestMajorCompactorTTL method modifyTTL.

protected void modifyTTL(TableName tableName) throws IOException, InterruptedException {
    // Set the TTL to 5 secs, so all the files just written above will get cleaned up on compact.
    admin.disableTable(tableName);
    utility.waitTableDisabled(tableName.getName());
    TableDescriptor descriptor = admin.getDescriptor(tableName);
    ColumnFamilyDescriptor colDesc = descriptor.getColumnFamily(FAMILY);
    ColumnFamilyDescriptorBuilder cFDB = ColumnFamilyDescriptorBuilder.newBuilder(colDesc);
    cFDB.setTimeToLive(5);
    admin.modifyColumnFamily(tableName, cFDB.build());
    admin.enableTable(tableName);
    utility.waitTableEnabled(tableName);
}
Also used : ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Aggregations

ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)61 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)43 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)19 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)16 Test (org.junit.Test)9 Table (org.apache.hadoop.hbase.client.Table)7 Admin (org.apache.hadoop.hbase.client.Admin)6 Put (org.apache.hadoop.hbase.client.Put)6 IOException (java.io.IOException)5 Configuration (org.apache.hadoop.conf.Configuration)5 Path (org.apache.hadoop.fs.Path)5 TableName (org.apache.hadoop.hbase.TableName)4 ArrayList (java.util.ArrayList)3 Map (java.util.Map)3 QName (javax.xml.namespace.QName)3 TableNotEnabledException (org.apache.hadoop.hbase.TableNotEnabledException)3 TableNotFoundException (org.apache.hadoop.hbase.TableNotFoundException)3 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)3 FileStatus (org.apache.hadoop.fs.FileStatus)2 FileSystem (org.apache.hadoop.fs.FileSystem)2