Search in sources :

Example 6 with CompoundConfiguration

use of org.apache.hadoop.hbase.CompoundConfiguration in project hbase by apache.

the class TableDescriptorChecker method sanityCheck.

/**
 * Checks whether the table conforms to some sane limits, and configured
 * values (compression, etc) work. Throws an exception if something is wrong.
 */
public static void sanityCheck(final Configuration c, final TableDescriptor td) throws IOException {
    CompoundConfiguration conf = new CompoundConfiguration().add(c).addBytesMap(td.getValues());
    // Setting this to true logs the warning instead of throwing exception
    boolean logWarn = false;
    if (!conf.getBoolean(TABLE_SANITY_CHECKS, DEFAULT_TABLE_SANITY_CHECKS)) {
        logWarn = true;
    }
    String tableVal = td.getValue(TABLE_SANITY_CHECKS);
    if (tableVal != null && !Boolean.valueOf(tableVal)) {
        logWarn = true;
    }
    // check max file size
    // 2M is the default lower limit
    long maxFileSizeLowerLimit = 2 * 1024 * 1024L;
    // if not set MAX_FILESIZE in TableDescriptor, and not set HREGION_MAX_FILESIZE in
    // hbase-site.xml, use maxFileSizeLowerLimit instead to skip this check
    long maxFileSize = td.getValue(TableDescriptorBuilder.MAX_FILESIZE) == null ? conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit) : Long.parseLong(td.getValue(TableDescriptorBuilder.MAX_FILESIZE));
    if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
        String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + maxFileSize + ") is too small, which might cause over splitting into unmanageable " + "number of regions.";
        warnOrThrowExceptionForFailure(logWarn, message, null);
    }
    // check flush size
    // 1M is the default lower limit
    long flushSizeLowerLimit = 1024 * 1024L;
    // if not set MEMSTORE_FLUSHSIZE in TableDescriptor, and not set HREGION_MEMSTORE_FLUSH_SIZE in
    // hbase-site.xml, use flushSizeLowerLimit instead to skip this check
    long flushSize = td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE) == null ? conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit) : Long.parseLong(td.getValue(TableDescriptorBuilder.MEMSTORE_FLUSHSIZE));
    if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
        String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + flushSize + ") is too small, which might cause" + " very frequent flushing.";
        warnOrThrowExceptionForFailure(logWarn, message, null);
    }
    // check that coprocessors and other specified plugin classes can be loaded
    try {
        checkClassLoading(conf, td);
    } catch (Exception ex) {
        warnOrThrowExceptionForFailure(logWarn, ex.getMessage(), null);
    }
    if (conf.getBoolean(MASTER_CHECK_COMPRESSION, DEFAULT_MASTER_CHECK_COMPRESSION)) {
        // check compression can be loaded
        try {
            checkCompression(td);
        } catch (IOException e) {
            warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
        }
    }
    if (conf.getBoolean(MASTER_CHECK_ENCRYPTION, DEFAULT_MASTER_CHECK_ENCRYPTION)) {
        // check encryption can be loaded
        try {
            checkEncryption(conf, td);
        } catch (IOException e) {
            warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
        }
    }
    // Verify compaction policy
    try {
        checkCompactionPolicy(conf, td);
    } catch (IOException e) {
        warnOrThrowExceptionForFailure(false, e.getMessage(), e);
    }
    // check that we have at least 1 CF
    if (td.getColumnFamilyCount() == 0) {
        String message = "Table should have at least one column family.";
        warnOrThrowExceptionForFailure(logWarn, message, null);
    }
    // check that we have minimum 1 region replicas
    int regionReplicas = td.getRegionReplication();
    if (regionReplicas < 1) {
        String message = "Table region replication should be at least one.";
        warnOrThrowExceptionForFailure(logWarn, message, null);
    }
    // Meta table shouldn't be set as read only, otherwise it will impact region assignments
    if (td.isReadOnly() && TableName.isMetaTableName(td.getTableName())) {
        warnOrThrowExceptionForFailure(false, "Meta table can't be set as read only.", null);
    }
    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
        if (hcd.getTimeToLive() <= 0) {
            String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
            warnOrThrowExceptionForFailure(logWarn, message, null);
        }
        // check blockSize
        if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
            String message = "Block size for column family " + hcd.getNameAsString() + "  must be between 1K and 16MB.";
            warnOrThrowExceptionForFailure(logWarn, message, null);
        }
        // check versions
        if (hcd.getMinVersions() < 0) {
            String message = "Min versions for column family " + hcd.getNameAsString() + "  must be positive.";
            warnOrThrowExceptionForFailure(logWarn, message, null);
        }
        // check minVersions <= maxVerions
        if (hcd.getMinVersions() > hcd.getMaxVersions()) {
            String message = "Min versions for column family " + hcd.getNameAsString() + " must be less than the Max versions.";
            warnOrThrowExceptionForFailure(logWarn, message, null);
        }
        // check replication scope
        checkReplicationScope(hcd);
        // check bloom filter type
        checkBloomFilterType(hcd);
        // set the value, in this case we use default replication factor set in the file system.
        if (hcd.getDFSReplication() < 0) {
            String message = "HFile Replication for column family " + hcd.getNameAsString() + "  must be greater than zero.";
            warnOrThrowExceptionForFailure(logWarn, message, null);
        }
        // check in-memory compaction
        try {
            hcd.getInMemoryCompaction();
        } catch (IllegalArgumentException e) {
            warnOrThrowExceptionForFailure(logWarn, e.getMessage(), e);
        }
    }
}
Also used : CompoundConfiguration(org.apache.hadoop.hbase.CompoundConfiguration) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException)

Aggregations

CompoundConfiguration (org.apache.hadoop.hbase.CompoundConfiguration)6 IOException (java.io.IOException)5 Configuration (org.apache.hadoop.conf.Configuration)4 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)2 InterruptedIOException (java.io.InterruptedIOException)1 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)1 HasMasterServices (org.apache.hadoop.hbase.coprocessor.HasMasterServices)1 HasRegionServerServices (org.apache.hadoop.hbase.coprocessor.HasRegionServerServices)1 MasterCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment)1 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)1 RegionServerCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment)1 TimeoutIOException (org.apache.hadoop.hbase.exceptions.TimeoutIOException)1 MasterServices (org.apache.hadoop.hbase.master.MasterServices)1 RegionServerServices (org.apache.hadoop.hbase.regionserver.RegionServerServices)1 Pair (org.apache.hadoop.hbase.util.Pair)1 MapMaker (org.apache.hbase.thirdparty.com.google.common.collect.MapMaker)1