use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.
the class TestVisibilityLabelsWithDeletes method createTable.
private void createTable(TableName tableName, int maxVersions) throws IOException {
ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(fam);
if (maxVersions > 0) {
builder.setMaxVersions(maxVersions);
}
TEST_UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(builder.build()).build());
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.
the class BulkLoadHFilesTool method createTable.
/**
* If the table is created for the first time, then "completebulkload" reads the files twice. More
* modifications necessary if we want to avoid doing it.
*/
private void createTable(TableName tableName, Path hfofDir, AsyncAdmin admin) throws IOException {
final FileSystem fs = hfofDir.getFileSystem(getConf());
// Add column families
// Build a set of keys
List<ColumnFamilyDescriptorBuilder> familyBuilders = new ArrayList<>();
SortedMap<byte[], Integer> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor<ColumnFamilyDescriptorBuilder>() {
@Override
public ColumnFamilyDescriptorBuilder bulkFamily(byte[] familyName) {
ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(familyName);
familyBuilders.add(builder);
return builder;
}
@Override
public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus) throws IOException {
Path hfile = hfileStatus.getPath();
try (HFile.Reader reader = HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) {
if (builder.getCompressionType() != reader.getFileContext().getCompression()) {
builder.setCompressionType(reader.getFileContext().getCompression());
LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + " for family " + builder.getNameAsString());
}
byte[] first = reader.getFirstRowKey().get();
byte[] last = reader.getLastRowKey().get();
LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
// To eventually infer start key-end key boundaries
Integer value = map.getOrDefault(first, 0);
map.put(first, value + 1);
value = map.containsKey(last) ? map.get(last) : 0;
map.put(last, value - 1);
}
}
}, true);
byte[][] keys = inferBoundaries(map);
TableDescriptorBuilder tdBuilder = TableDescriptorBuilder.newBuilder(tableName);
familyBuilders.stream().map(ColumnFamilyDescriptorBuilder::build).forEachOrdered(tdBuilder::setColumnFamily);
FutureUtils.get(admin.createTable(tdBuilder.build(), keys));
LOG.info("Table " + tableName + " is available!!");
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.
the class TestHRegion method testDataInMemoryWithoutWAL.
private static void testDataInMemoryWithoutWAL(HRegion region, Put originalPut, final Put addPut, long delta) throws IOException {
final long initSize = region.getDataInMemoryWithoutWAL();
// save normalCPHost and replaced by mockedCPHost
RegionCoprocessorHost normalCPHost = region.getCoprocessorHost();
RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class);
// Because the preBatchMutate returns void, we can't do usual Mockito when...then form. Must
// do below format (from Mockito doc).
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
MiniBatchOperationInProgress<Mutation> mb = invocation.getArgument(0);
mb.addOperationsFromCP(0, new Mutation[] { addPut });
return null;
}
}).when(mockedCPHost).preBatchMutate(Mockito.isA(MiniBatchOperationInProgress.class));
ColumnFamilyDescriptorBuilder builder = ColumnFamilyDescriptorBuilder.newBuilder(COLUMN_FAMILY_BYTES);
ScanInfo info = new ScanInfo(CONF, builder.build(), Long.MAX_VALUE, Long.MAX_VALUE, region.getCellComparator());
Mockito.when(mockedCPHost.preFlushScannerOpen(Mockito.any(HStore.class), Mockito.any())).thenReturn(info);
Mockito.when(mockedCPHost.preFlush(Mockito.any(), Mockito.any(StoreScanner.class), Mockito.any())).thenAnswer(i -> i.getArgument(1));
region.setCoprocessorHost(mockedCPHost);
region.put(originalPut);
region.setCoprocessorHost(normalCPHost);
final long finalSize = region.getDataInMemoryWithoutWAL();
assertEquals("finalSize:" + finalSize + ", initSize:" + initSize + ", delta:" + delta, finalSize, initSize + delta);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.
the class TestHRegionFileSystem method testBlockStoragePolicy.
@Test
public void testBlockStoragePolicy() throws Exception {
TEST_UTIL = new HBaseTestingUtil();
Configuration conf = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniCluster();
Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table));
HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
// the original block storage policy would be HOT
String spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
String spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertEquals("HOT", spA);
assertEquals("HOT", spB);
// Recreate table and make sure storage policy could be set through configuration
TEST_UTIL.shutdownMiniCluster();
TEST_UTIL.getConfiguration().set(HStore.BLOCK_STORAGE_POLICY_KEY, "WARM");
TEST_UTIL.startMiniCluster();
table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES);
regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf);
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertEquals("WARM", spA);
assertEquals("WARM", spB);
// alter table cf schema to change storage policies
// and make sure it could override settings in conf
ColumnFamilyDescriptorBuilder cfdA = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]);
// alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor
cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD");
admin.modifyColumnFamily(TABLE_NAME, cfdA.build());
while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition()) {
Thread.sleep(200);
LOG.debug("Waiting on table to finish schema altering");
}
// alter through HColumnDescriptor#setStoragePolicy
ColumnFamilyDescriptorBuilder cfdB = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[1]);
cfdB.setStoragePolicy("ALL_SSD");
admin.modifyColumnFamily(TABLE_NAME, cfdB.build());
while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates().hasRegionsInTransition()) {
Thread.sleep(200);
LOG.debug("Waiting on table to finish schema altering");
}
spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertNotNull(spA);
assertEquals("ONE_SSD", spA);
assertNotNull(spB);
assertEquals("ALL_SSD", spB);
// flush memstore snapshot into 3 files
for (long i = 0; i < 3; i++) {
Put put = new Put(Bytes.toBytes(i));
put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i));
table.put(put);
admin.flush(TABLE_NAME);
}
// there should be 3 files in store dir
FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0]));
FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, storePath);
assertNotNull(storeFiles);
assertEquals(3, storeFiles.length);
// store temp dir still exists but empty
Path storeTempDir = new Path(regionFs.getTempDir(), Bytes.toString(FAMILIES[0]));
assertTrue(fs.exists(storeTempDir));
FileStatus[] tempFiles = CommonFSUtils.listStatus(fs, storeTempDir);
assertNull(tempFiles);
// storage policy of cf temp dir and 3 store files should be ONE_SSD
assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(storeTempDir));
for (FileStatus status : storeFiles) {
assertEquals("ONE_SSD", ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath()));
}
// change storage policies by calling raw api directly
regionFs.setStoragePolicy(Bytes.toString(FAMILIES[0]), "ALL_SSD");
regionFs.setStoragePolicy(Bytes.toString(FAMILIES[1]), "ONE_SSD");
spA = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[0]));
spB = regionFs.getStoragePolicyName(Bytes.toString(FAMILIES[1]));
LOG.debug("Storage policy of cf 0: [" + spA + "].");
LOG.debug("Storage policy of cf 1: [" + spB + "].");
assertNotNull(spA);
assertEquals("ALL_SSD", spA);
assertNotNull(spB);
assertEquals("ONE_SSD", spB);
} finally {
table.close();
TEST_UTIL.deleteTable(TABLE_NAME);
TEST_UTIL.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder in project hbase by apache.
the class TestMajorCompactorTTL method modifyTTL.
protected void modifyTTL(TableName tableName) throws IOException, InterruptedException {
// Set the TTL to 5 secs, so all the files just written above will get cleaned up on compact.
admin.disableTable(tableName);
utility.waitTableDisabled(tableName.getName());
TableDescriptor descriptor = admin.getDescriptor(tableName);
ColumnFamilyDescriptor colDesc = descriptor.getColumnFamily(FAMILY);
ColumnFamilyDescriptorBuilder cFDB = ColumnFamilyDescriptorBuilder.newBuilder(colDesc);
cFDB.setTimeToLive(5);
admin.modifyColumnFamily(tableName, cFDB.build());
admin.enableTable(tableName);
utility.waitTableEnabled(tableName);
}
Aggregations