use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class HMaster method sanityCheckTableDescriptor.
/**
* Checks whether the table conforms to some sane limits, and configured
* values (compression, etc) work. Throws an exception if something is wrong.
* @throws IOException
*/
private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
final String CONF_KEY = "hbase.table.sanity.checks";
boolean logWarn = false;
if (!conf.getBoolean(CONF_KEY, true)) {
logWarn = true;
}
String tableVal = htd.getConfigurationValue(CONF_KEY);
if (tableVal != null && !Boolean.valueOf(tableVal)) {
logWarn = true;
}
// check max file size
// 2M is the default lower limit
long maxFileSizeLowerLimit = 2 * 1024 * 1024L;
long maxFileSize = htd.getMaxFileSize();
if (maxFileSize < 0) {
maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
}
if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + maxFileSize + ") is too small, which might cause over splitting into unmanageable " + "number of regions.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check flush size
// 1M is the default lower limit
long flushSizeLowerLimit = 1024 * 1024L;
long flushSize = htd.getMemStoreFlushSize();
if (flushSize < 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
}
if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + flushSize + ") is too small, which might cause" + " very frequent flushing.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check that coprocessors and other specified plugin classes can be loaded
try {
checkClassLoading(conf, htd);
} catch (Exception ex) {
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, ex.getMessage(), null);
}
// check compression can be loaded
try {
checkCompression(htd);
} catch (IOException e) {
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
}
// check encryption can be loaded
try {
checkEncryption(conf, htd);
} catch (IOException e) {
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
}
// Verify compaction policy
try {
checkCompactionPolicy(conf, htd);
} catch (IOException e) {
warnOrThrowExceptionForFailure(false, CONF_KEY, e.getMessage(), e);
}
// check that we have at least 1 CF
if (htd.getColumnFamilyCount() == 0) {
String message = "Table should have at least one column family.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.getTimeToLive() <= 0) {
String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check blockSize
if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
String message = "Block size for column family " + hcd.getNameAsString() + " must be between 1K and 16MB.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check versions
if (hcd.getMinVersions() < 0) {
String message = "Min versions for column family " + hcd.getNameAsString() + " must be positive.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check minVersions <= maxVerions
if (hcd.getMinVersions() > hcd.getMaxVersions()) {
String message = "Min versions for column family " + hcd.getNameAsString() + " must be less than the Max versions.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// check replication scope
checkReplicationScope(hcd);
// set the value, in this case we use default replication factor set in the file system.
if (hcd.getDFSReplication() < 0) {
String message = "HFile Replication for column family " + hcd.getNameAsString() + " must be greater than zero.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
// TODO: should we check coprocessors and encryption ?
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class HMaster method checkCompactionPolicy.
private void checkCompactionPolicy(Configuration conf, HTableDescriptor htd) throws IOException {
// FIFO compaction has some requirements
// Actually FCP ignores periodic major compactions
String className = htd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
if (className == null) {
className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, ExploringCompactionPolicy.class.getName());
}
int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
String sv = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
if (sv != null) {
blockingFileCount = Integer.parseInt(sv);
} else {
blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
}
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
String compactionPolicy = hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
if (compactionPolicy == null) {
compactionPolicy = className;
}
if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
continue;
}
// FIFOCompaction
String message = null;
// 1. Check TTL
if (hcd.getTimeToLive() == HColumnDescriptor.DEFAULT_TTL) {
message = "Default TTL is not supported for FIFO compaction";
throw new IOException(message);
}
// 2. Check min versions
if (hcd.getMinVersions() > 0) {
message = "MIN_VERSION > 0 is not supported for FIFO compaction";
throw new IOException(message);
}
// 3. blocking file count
String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
if (sbfc != null) {
blockingFileCount = Integer.parseInt(sbfc);
}
if (blockingFileCount < 1000) {
message = "blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + " is below recommended minimum of 1000";
throw new IOException(message);
}
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class MasterRpcServices method compactMob.
/**
* Compacts the mob files in the current table.
* @param request the request.
* @param tableName the current table name.
* @return The response of the mob file compaction.
* @throws IOException
*/
private CompactRegionResponse compactMob(final CompactRegionRequest request, TableName tableName) throws IOException {
if (!master.getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) {
throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
}
boolean allFiles = false;
List<HColumnDescriptor> compactedColumns = new ArrayList<>();
HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
byte[] family = null;
if (request.hasFamily()) {
family = request.getFamily().toByteArray();
for (HColumnDescriptor hcd : hcds) {
if (Bytes.equals(family, hcd.getName())) {
if (!hcd.isMobEnabled()) {
LOG.error("Column family " + hcd.getNameAsString() + " is not a mob column family");
throw new DoNotRetryIOException("Column family " + hcd.getNameAsString() + " is not a mob column family");
}
compactedColumns.add(hcd);
}
}
} else {
for (HColumnDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
compactedColumns.add(hcd);
}
}
}
if (compactedColumns.isEmpty()) {
LOG.error("No mob column families are assigned in the mob compaction");
throw new DoNotRetryIOException("No mob column families are assigned in the mob compaction");
}
if (request.hasMajor() && request.getMajor()) {
allFiles = true;
}
String familyLogMsg = (family != null) ? Bytes.toString(family) : "";
if (LOG.isTraceEnabled()) {
LOG.trace("User-triggered mob compaction requested for table: " + tableName.getNameAsString() + " for column family: " + familyLogMsg);
}
master.requestMobCompaction(tableName, compactedColumns, allFiles);
return CompactRegionResponse.newBuilder().build();
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class RegionSplitter method splitScan.
static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList, final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException {
LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
// Get table info
Pair<Path, Path> tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName);
Path tableDir = tableDirAndSplitFile.getFirst();
FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
// Clear the cache to forcibly refresh region information
((ClusterConnection) connection).clearRegionCache();
HTableDescriptor htd = null;
try (Table table = connection.getTable(tableName)) {
htd = table.getTableDescriptor();
}
try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
// for every region that hasn't been verified as a finished split
for (Pair<byte[], byte[]> region : regionList) {
byte[] start = region.getFirst();
byte[] split = region.getSecond();
// see if the new split daughter region has come online
try {
HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo();
if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
logicalSplitting.add(region);
continue;
}
} catch (NoServerForRegionException nsfre) {
// NSFRE will occur if the old hbase:meta entry has no server assigned
LOG.info(nsfre);
logicalSplitting.add(region);
continue;
}
try {
// when a daughter region is opened, a compaction is triggered
// wait until compaction completes for both daughter regions
LinkedList<HRegionInfo> check = Lists.newLinkedList();
check.add(regionLocator.getRegionLocation(start).getRegionInfo());
check.add(regionLocator.getRegionLocation(split).getRegionInfo());
for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) {
byte[] sk = hri.getStartKey();
if (sk.length == 0)
sk = splitAlgo.firstRow();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true);
// Check every Column Family for that region -- check does not have references.
boolean refFound = false;
for (HColumnDescriptor c : htd.getFamilies()) {
if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
break;
}
}
// compaction is completed when all reference files are gone
if (!refFound) {
check.remove(hri);
}
}
if (check.isEmpty()) {
finished.add(region);
} else {
physicalSplitting.add(region);
}
} catch (NoServerForRegionException nsfre) {
LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
physicalSplitting.add(region);
((ClusterConnection) connection).clearRegionCache();
}
}
LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + " split wait / " + physicalSplitting.size() + " reference wait");
return finished;
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class ProtobufUtil method convertToHTableDesc.
/**
* Converts a TableSchema to HTableDescriptor
* @param ts A pb TableSchema instance.
* @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
*/
public static HTableDescriptor convertToHTableDesc(final TableSchema ts) {
List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
HColumnDescriptor[] hcds = new HColumnDescriptor[list.size()];
int index = 0;
for (ColumnFamilySchema cfs : list) {
hcds[index++] = ProtobufUtil.convertToHColumnDesc(cfs);
}
HTableDescriptor htd = new HTableDescriptor(ProtobufUtil.toTableName(ts.getTableName()));
for (HColumnDescriptor hcd : hcds) {
htd.addFamily(hcd);
}
for (BytesBytesPair a : ts.getAttributesList()) {
htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
}
for (NameStringPair a : ts.getConfigurationList()) {
htd.setConfiguration(a.getName(), a.getValue());
}
return htd;
}
Aggregations