use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class HRegionFileSystem method insertRegionFilesIntoStoreTracker.
private void insertRegionFilesIntoStoreTracker(List<Path> allFiles, MasterProcedureEnv env, HRegionFileSystem regionFs) throws IOException {
TableDescriptor tblDesc = env.getMasterServices().getTableDescriptors().get(regionInfo.getTable());
// we need to map trackers per store
Map<String, StoreFileTracker> trackerMap = new HashMap<>();
// we need to map store files per store
Map<String, List<StoreFileInfo>> fileInfoMap = new HashMap<>();
for (Path file : allFiles) {
String familyName = file.getParent().getName();
trackerMap.computeIfAbsent(familyName, t -> StoreFileTrackerFactory.create(conf, tblDesc, tblDesc.getColumnFamily(Bytes.toBytes(familyName)), regionFs));
fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>());
List<StoreFileInfo> infos = fileInfoMap.get(familyName);
infos.add(new StoreFileInfo(conf, fs, file, true));
}
for (Map.Entry<String, StoreFileTracker> entry : trackerMap.entrySet()) {
entry.getValue().add(fileInfoMap.get(entry.getKey()));
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class IncreasingToUpperBoundRegionSplitPolicy method configureForRegion.
@Override
protected void configureForRegion(HRegion region) {
super.configureForRegion(region);
Configuration conf = getConf();
initialSize = conf.getLong("hbase.increasing.policy.initial.size", -1);
if (initialSize > 0) {
return;
}
TableDescriptor desc = region.getTableDescriptor();
if (desc != null) {
initialSize = 2 * desc.getMemStoreFlushSize();
}
if (initialSize <= 0) {
initialSize = 2 * conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class IntegrationTestIngestStripeCompactions method initTable.
@Override
protected void initTable() throws IOException {
// Do the same as the LoadTestTool does, but with different table configuration.
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(getTablename()).setValue(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName()).setValue(HStore.BLOCKING_STOREFILES_KEY, "100").build();
ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(HFileTestUtil.DEFAULT_COLUMN_FAMILY);
HBaseTestingUtil.createPreSplitLoadTestTable(util.getConfiguration(), tableDescriptor, familyDescriptor);
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class IntegrationTestIngestWithEncryption method setUp.
@Before
@Override
public void setUp() throws Exception {
// Initialize the cluster. This invokes LoadTestTool -init_only, which
// will create the test table, appropriately pre-split
super.setUp();
if (!initialized) {
return;
}
// Update the test table schema so HFiles from this point will be written with
// encryption features enabled.
final Admin admin = util.getAdmin();
TableDescriptor tableDescriptor = admin.getDescriptor(getTablename());
for (ColumnFamilyDescriptor columnDescriptor : tableDescriptor.getColumnFamilies()) {
ColumnFamilyDescriptor updatedColumn = ColumnFamilyDescriptorBuilder.newBuilder(columnDescriptor).setEncryptionType("AES").build();
LOG.info("Updating CF schema for " + getTablename() + "." + columnDescriptor.getNameAsString());
admin.disableTable(getTablename());
admin.modifyColumnFamily(getTablename(), updatedColumn);
admin.enableTable(getTablename());
util.waitFor(30000, 1000, true, new Predicate<IOException>() {
@Override
public boolean evaluate() throws IOException {
return admin.isTableAvailable(getTablename());
}
});
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestScannersWithFilters method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class);
marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
Admin admin = TEST_UTIL.getAdmin();
if (!admin.tableExists(TABLE)) {
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[0])).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILIES[1])).build();
admin.createTable(tableDescriptor);
Table table = TEST_UTIL.getConnection().getTable(TABLE);
// Insert first half
for (byte[] ROW : ROWS_ONE) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[0]);
}
table.put(p);
}
for (byte[] ROW : ROWS_TWO) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]);
}
table.put(p);
}
// Insert second half (reverse families)
for (byte[] ROW : ROWS_ONE) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[0]);
}
table.put(p);
}
for (byte[] ROW : ROWS_TWO) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[1]);
}
table.put(p);
}
// Delete the second qualifier from all rows and families
for (byte[] ROW : ROWS_ONE) {
Delete d = new Delete(ROW);
d.addColumns(FAMILIES[0], QUALIFIERS_ONE[1]);
d.addColumns(FAMILIES[1], QUALIFIERS_ONE[1]);
table.delete(d);
}
for (byte[] ROW : ROWS_TWO) {
Delete d = new Delete(ROW);
d.addColumns(FAMILIES[0], QUALIFIERS_TWO[1]);
d.addColumns(FAMILIES[1], QUALIFIERS_TWO[1]);
table.delete(d);
}
colsPerRow -= 2;
// Delete the second rows from both groups, one column at a time
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
Delete d = new Delete(ROWS_ONE[1]);
d.addColumns(FAMILIES[0], QUALIFIER);
d.addColumns(FAMILIES[1], QUALIFIER);
table.delete(d);
}
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
Delete d = new Delete(ROWS_TWO[1]);
d.addColumns(FAMILIES[0], QUALIFIER);
d.addColumns(FAMILIES[1], QUALIFIER);
table.delete(d);
}
numRows -= 2;
table.close();
}
}
Aggregations