use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class AccessController method createACLTable.
/**
* Create the ACL table
* @throws IOException
*/
private static void createACLTable(Admin admin) throws IOException {
/**
* Table descriptor for ACL table
*/
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY).setMaxVersions(1).setInMemory(true).setBlockCacheEnabled(true).setBlocksize(8 * 1024).setBloomFilterType(BloomType.NONE).setScope(HConstants.REPLICATION_SCOPE_LOCAL).build();
TableDescriptor td = TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME).setColumnFamily(cfd).build();
admin.createTable(td);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class RegionSplitter method splitScan.
static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList, final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException {
LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
// Get table info
Pair<Path, Path> tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName);
Path tableDir = tableDirAndSplitFile.getFirst();
FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
// Clear the cache to forcibly refresh region information
connection.clearRegionLocationCache();
TableDescriptor htd = null;
try (Table table = connection.getTable(tableName)) {
htd = table.getDescriptor();
}
try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
// for every region that hasn't been verified as a finished split
for (Pair<byte[], byte[]> region : regionList) {
byte[] start = region.getFirst();
byte[] split = region.getSecond();
// see if the new split daughter region has come online
try {
RegionInfo dri = regionLocator.getRegionLocation(split, true).getRegion();
if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
logicalSplitting.add(region);
continue;
}
} catch (NoServerForRegionException nsfre) {
// NSFRE will occur if the old hbase:meta entry has no server assigned
LOG.info(nsfre.toString(), nsfre);
logicalSplitting.add(region);
continue;
}
try {
// when a daughter region is opened, a compaction is triggered
// wait until compaction completes for both daughter regions
LinkedList<RegionInfo> check = Lists.newLinkedList();
check.add(regionLocator.getRegionLocation(start).getRegion());
check.add(regionLocator.getRegionLocation(split).getRegion());
for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) {
byte[] sk = hri.getStartKey();
if (sk.length == 0)
sk = splitAlgo.firstRow();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true);
// Check every Column Family for that region -- check does not have references.
boolean refFound = false;
for (ColumnFamilyDescriptor c : htd.getColumnFamilies()) {
if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
break;
}
}
// compaction is completed when all reference files are gone
if (!refFound) {
check.remove(hri);
}
}
if (check.isEmpty()) {
finished.add(region);
} else {
physicalSplitting.add(region);
}
} catch (NoServerForRegionException nsfre) {
LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
physicalSplitting.add(region);
connection.clearRegionLocationCache();
}
}
LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + " split wait / " + physicalSplitting.size() + " reference wait");
return finished;
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TableDescriptorChecker method checkCompactionPolicy.
private static void checkCompactionPolicy(Configuration conf, TableDescriptor td) throws IOException {
// FIFO compaction has some requirements
// Actually FCP ignores periodic major compactions
String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
if (className == null) {
className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, ExploringCompactionPolicy.class.getName());
}
int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY);
if (sv != null) {
blockingFileCount = Integer.parseInt(sv);
} else {
blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
}
for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
String compactionPolicy = hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
if (compactionPolicy == null) {
compactionPolicy = className;
}
if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
continue;
}
// FIFOCompaction
String message = null;
// 1. Check TTL
if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
message = "Default TTL is not supported for FIFO compaction";
throw new IOException(message);
}
// 2. Check min versions
if (hcd.getMinVersions() > 0) {
message = "MIN_VERSION > 0 is not supported for FIFO compaction";
throw new IOException(message);
}
// 3. blocking file count
sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
if (sv != null) {
blockingFileCount = Integer.parseInt(sv);
}
if (blockingFileCount < 1000) {
message = "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString();
throw new IOException(message);
}
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestCoreRegionCoprocessor method before.
@Before
public void before() throws IOException {
String methodName = this.name.getMethodName();
TableName tn = TableName.valueOf(methodName);
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(methodName)).build();
TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfd).build();
RegionInfo ri = RegionInfoBuilder.newBuilder(tn).build();
this.rss = new MockRegionServerServices(HTU.getConfiguration());
this.region = HRegion.openHRegion(ri, td, null, HTU.getConfiguration(), this.rss, null);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class MasterProcedureTestingUtility method validateColumnFamilyModification.
public static void validateColumnFamilyModification(final HMaster master, final TableName tableName, final String family, ColumnFamilyDescriptor columnDescriptor) throws IOException {
TableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
ColumnFamilyDescriptor hcfd = htd.getColumnFamily(Bytes.toBytes(family));
assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor));
}
Aggregations