use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class MobUtils method getMobColumnFamilies.
/**
* Get list of Mob column families (if any exists)
* @param htd table descriptor
* @return list of Mob column families
*/
public static List<ColumnFamilyDescriptor> getMobColumnFamilies(TableDescriptor htd) {
List<ColumnFamilyDescriptor> fams = new ArrayList<ColumnFamilyDescriptor>();
ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
for (ColumnFamilyDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
fams.add(hcd);
}
}
return fams;
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class RemoveColumnAction method perform.
@Override
public void perform() throws Exception {
TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
ColumnFamilyDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
if (columnDescriptors.length <= (protectedColumns == null ? 1 : protectedColumns.size())) {
return;
}
int index = random.nextInt(columnDescriptors.length);
while (protectedColumns != null && protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
index = random.nextInt(columnDescriptors.length);
}
byte[] colDescName = columnDescriptors[index].getName();
getLogger().debug("Performing action: Removing " + Bytes.toString(colDescName) + " from " + tableName.getNameAsString());
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor);
builder.removeColumnFamily(colDescName);
// Don't try the modify if we're stopping
if (context.isStopping()) {
return;
}
admin.modifyTable(builder.build());
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestTableScan method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
conf.set(Constants.CUSTOM_FILTERS, "CustomFilter:" + CustomFilter.class.getName());
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(conf);
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
Admin admin = TEST_UTIL.getAdmin();
if (!admin.tableExists(TABLE)) {
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TABLE);
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build();
tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build();
tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
admin.createTable(tableDescriptorBuilder.build());
expectedRows1 = TestScannerResource.insertData(conf, TABLE, COLUMN_1, 1.0);
expectedRows2 = TestScannerResource.insertData(conf, TABLE, COLUMN_2, 0.5);
expectedRows3 = TestScannerResource.insertData(conf, TABLE, COLUMN_EMPTY, 1.0);
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class MasterRegion method tryMigrate.
private static void tryMigrate(Configuration conf, FileSystem fs, Path tableDir, RegionInfo regionInfo, TableDescriptor oldTd, TableDescriptor newTd) throws IOException {
Class<? extends StoreFileTracker> oldSft = StoreFileTrackerFactory.getTrackerClass(oldTd.getValue(StoreFileTrackerFactory.TRACKER_IMPL));
Class<? extends StoreFileTracker> newSft = StoreFileTrackerFactory.getTrackerClass(newTd.getValue(StoreFileTrackerFactory.TRACKER_IMPL));
if (oldSft.equals(newSft)) {
LOG.debug("old store file tracker {} is the same with new store file tracker, skip migration", StoreFileTrackerFactory.getStoreFileTrackerName(oldSft));
if (!oldTd.equals(newTd)) {
// we may change other things such as adding a new family, so here we still need to persist
// the new table descriptor
LOG.info("Update table descriptor from {} to {}", oldTd, newTd);
FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir, newTd, true);
}
return;
}
LOG.info("Migrate store file tracker from {} to {}", oldSft.getSimpleName(), newSft.getSimpleName());
HRegionFileSystem hfs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, tableDir, regionInfo, false);
for (ColumnFamilyDescriptor oldCfd : oldTd.getColumnFamilies()) {
StoreFileTracker oldTracker = StoreFileTrackerFactory.create(conf, oldTd, oldCfd, hfs);
StoreFileTracker newTracker = StoreFileTrackerFactory.create(conf, oldTd, oldCfd, hfs);
List<StoreFileInfo> files = oldTracker.load();
LOG.debug("Store file list for {}: {}", oldCfd.getNameAsString(), files);
newTracker.set(oldTracker.load());
}
// persist the new table descriptor after migration
LOG.info("Update table descriptor from {} to {}", oldTd, newTd);
FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir, newTd, true);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestHBaseMetaEdit method testEditMeta.
/**
* Set versions, set HBASE-16213 indexed block encoding, and add a column family.
* Delete the column family. Then try to delete a core hbase:meta family (should fail).
* Verify they are all in place by looking at TableDescriptor AND by checking
* what the RegionServer sees after opening Region.
*/
@Test
public void testEditMeta() throws IOException {
Admin admin = UTIL.getAdmin();
admin.tableExists(TableName.META_TABLE_NAME);
TableDescriptor originalDescriptor = getMetaDescriptor();
ColumnFamilyDescriptor cfd = originalDescriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
int oldVersions = cfd.getMaxVersions();
// Add '1' to current versions count. Set encoding too.
cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(oldVersions + 1).setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING, DataBlockEncoding.ROW_INDEX_V1.toString()).build();
admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd);
byte[] extraColumnFamilyName = Bytes.toBytes("xtra");
ColumnFamilyDescriptor newCfd = ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
TableDescriptor descriptor = getMetaDescriptor();
// Assert new max versions is == old versions plus 1.
assertEquals(oldVersions + 1, descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
descriptor = getMetaDescriptor();
// Assert new max versions is == old versions plus 1.
assertEquals(oldVersions + 1, descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
assertTrue(descriptor.getColumnFamily(newCfd.getName()) != null);
String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
Region r = UTIL.getHBaseCluster().getRegionServer(0).getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
assertEquals(oldVersions + 1, r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
assertTrue(r.getStore(extraColumnFamilyName) != null);
// Assert we can't drop critical hbase:meta column family but we can drop any other.
admin.deleteColumnFamily(TableName.META_TABLE_NAME, newCfd.getName());
descriptor = getMetaDescriptor();
assertTrue(descriptor.getColumnFamily(newCfd.getName()) == null);
try {
admin.deleteColumnFamily(TableName.META_TABLE_NAME, HConstants.CATALOG_FAMILY);
fail("Should not reach here");
} catch (HBaseIOException hioe) {
assertTrue(hioe.getMessage().contains("Delete of hbase:meta"));
}
}
Aggregations