Search in sources :

Example 6 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class HMaster method sanityCheckTableDescriptor.

/**
   * Checks whether the table conforms to some sane limits, and configured
   * values (compression, etc) work. Throws an exception if something is wrong.
   * @throws IOException
   */
private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
    final String CONF_KEY = "hbase.table.sanity.checks";
    boolean logWarn = false;
    if (!conf.getBoolean(CONF_KEY, true)) {
        logWarn = true;
    }
    String tableVal = htd.getConfigurationValue(CONF_KEY);
    if (tableVal != null && !Boolean.valueOf(tableVal)) {
        logWarn = true;
    }
    // check max file size
    // 2M is the default lower limit
    long maxFileSizeLowerLimit = 2 * 1024 * 1024L;
    long maxFileSize = htd.getMaxFileSize();
    if (maxFileSize < 0) {
        maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit);
    }
    if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) {
        String message = "MAX_FILESIZE for table descriptor or " + "\"hbase.hregion.max.filesize\" (" + maxFileSize + ") is too small, which might cause over splitting into unmanageable " + "number of regions.";
        warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
    }
    // check flush size
    // 1M is the default lower limit
    long flushSizeLowerLimit = 1024 * 1024L;
    long flushSize = htd.getMemStoreFlushSize();
    if (flushSize < 0) {
        flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit);
    }
    if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) {
        String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" (" + flushSize + ") is too small, which might cause" + " very frequent flushing.";
        warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
    }
    // check that coprocessors and other specified plugin classes can be loaded
    try {
        checkClassLoading(conf, htd);
    } catch (Exception ex) {
        warnOrThrowExceptionForFailure(logWarn, CONF_KEY, ex.getMessage(), null);
    }
    // check compression can be loaded
    try {
        checkCompression(htd);
    } catch (IOException e) {
        warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
    }
    // check encryption can be loaded
    try {
        checkEncryption(conf, htd);
    } catch (IOException e) {
        warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e);
    }
    // Verify compaction policy
    try {
        checkCompactionPolicy(conf, htd);
    } catch (IOException e) {
        warnOrThrowExceptionForFailure(false, CONF_KEY, e.getMessage(), e);
    }
    // check that we have at least 1 CF
    if (htd.getColumnFamilyCount() == 0) {
        String message = "Table should have at least one column family.";
        warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
    }
    for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
        if (hcd.getTimeToLive() <= 0) {
            String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
            warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
        }
        // check blockSize
        if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) {
            String message = "Block size for column family " + hcd.getNameAsString() + "  must be between 1K and 16MB.";
            warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
        }
        // check versions
        if (hcd.getMinVersions() < 0) {
            String message = "Min versions for column family " + hcd.getNameAsString() + "  must be positive.";
            warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
        }
        // check minVersions <= maxVerions
        if (hcd.getMinVersions() > hcd.getMaxVersions()) {
            String message = "Min versions for column family " + hcd.getNameAsString() + " must be less than the Max versions.";
            warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
        }
        // check replication scope
        checkReplicationScope(hcd);
        // set the value, in this case we use default replication factor set in the file system.
        if (hcd.getDFSReplication() < 0) {
            String message = "HFile Replication for column family " + hcd.getNameAsString() + "  must be greater than zero.";
            warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
        }
    // TODO: should we check coprocessors and encryption ?
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) BypassCoprocessorException(org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException) CoordinatedStateException(org.apache.hadoop.hbase.CoordinatedStateException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) MergeRegionException(org.apache.hadoop.hbase.exceptions.MergeRegionException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) TableNotDisabledException(org.apache.hadoop.hbase.TableNotDisabledException) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) ServletException(javax.servlet.ServletException) PleaseHoldException(org.apache.hadoop.hbase.PleaseHoldException) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) UnknownRegionException(org.apache.hadoop.hbase.UnknownRegionException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) MasterNotRunningException(org.apache.hadoop.hbase.MasterNotRunningException) KeeperException(org.apache.zookeeper.KeeperException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException)

Example 7 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class HMaster method checkCompactionPolicy.

private void checkCompactionPolicy(Configuration conf, HTableDescriptor htd) throws IOException {
    // FIFO compaction has some requirements
    // Actually FCP ignores periodic major compactions
    String className = htd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
    if (className == null) {
        className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, ExploringCompactionPolicy.class.getName());
    }
    int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
    String sv = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
    if (sv != null) {
        blockingFileCount = Integer.parseInt(sv);
    } else {
        blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
    }
    for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
        String compactionPolicy = hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
        if (compactionPolicy == null) {
            compactionPolicy = className;
        }
        if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
            continue;
        }
        // FIFOCompaction
        String message = null;
        // 1. Check TTL
        if (hcd.getTimeToLive() == HColumnDescriptor.DEFAULT_TTL) {
            message = "Default TTL is not supported for FIFO compaction";
            throw new IOException(message);
        }
        // 2. Check min versions
        if (hcd.getMinVersions() > 0) {
            message = "MIN_VERSION > 0 is not supported for FIFO compaction";
            throw new IOException(message);
        }
        // 3. blocking file count
        String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
        if (sbfc != null) {
            blockingFileCount = Integer.parseInt(sbfc);
        }
        if (blockingFileCount < 1000) {
            message = "blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + " is below recommended minimum of 1000";
            throw new IOException(message);
        }
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException)

Example 8 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class MasterRpcServices method compactMob.

/**
   * Compacts the mob files in the current table.
   * @param request the request.
   * @param tableName the current table name.
   * @return The response of the mob file compaction.
   * @throws IOException
   */
private CompactRegionResponse compactMob(final CompactRegionRequest request, TableName tableName) throws IOException {
    if (!master.getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) {
        throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
    }
    boolean allFiles = false;
    List<HColumnDescriptor> compactedColumns = new ArrayList<>();
    HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
    byte[] family = null;
    if (request.hasFamily()) {
        family = request.getFamily().toByteArray();
        for (HColumnDescriptor hcd : hcds) {
            if (Bytes.equals(family, hcd.getName())) {
                if (!hcd.isMobEnabled()) {
                    LOG.error("Column family " + hcd.getNameAsString() + " is not a mob column family");
                    throw new DoNotRetryIOException("Column family " + hcd.getNameAsString() + " is not a mob column family");
                }
                compactedColumns.add(hcd);
            }
        }
    } else {
        for (HColumnDescriptor hcd : hcds) {
            if (hcd.isMobEnabled()) {
                compactedColumns.add(hcd);
            }
        }
    }
    if (compactedColumns.isEmpty()) {
        LOG.error("No mob column families are assigned in the mob compaction");
        throw new DoNotRetryIOException("No mob column families are assigned in the mob compaction");
    }
    if (request.hasMajor() && request.getMajor()) {
        allFiles = true;
    }
    String familyLogMsg = (family != null) ? Bytes.toString(family) : "";
    if (LOG.isTraceEnabled()) {
        LOG.trace("User-triggered mob compaction requested for table: " + tableName.getNameAsString() + " for column family: " + familyLogMsg);
    }
    master.requestMobCompaction(tableName, compactedColumns, allFiles);
    return CompactRegionResponse.newBuilder().build();
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList)

Example 9 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class RegionSplitter method splitScan.

static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList, final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException {
    LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
    // Get table info
    Pair<Path, Path> tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName);
    Path tableDir = tableDirAndSplitFile.getFirst();
    FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
    // Clear the cache to forcibly refresh region information
    ((ClusterConnection) connection).clearRegionCache();
    HTableDescriptor htd = null;
    try (Table table = connection.getTable(tableName)) {
        htd = table.getTableDescriptor();
    }
    try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
        // for every region that hasn't been verified as a finished split
        for (Pair<byte[], byte[]> region : regionList) {
            byte[] start = region.getFirst();
            byte[] split = region.getSecond();
            // see if the new split daughter region has come online
            try {
                HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo();
                if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
                    logicalSplitting.add(region);
                    continue;
                }
            } catch (NoServerForRegionException nsfre) {
                // NSFRE will occur if the old hbase:meta entry has no server assigned
                LOG.info(nsfre);
                logicalSplitting.add(region);
                continue;
            }
            try {
                // when a daughter region is opened, a compaction is triggered
                // wait until compaction completes for both daughter regions
                LinkedList<HRegionInfo> check = Lists.newLinkedList();
                check.add(regionLocator.getRegionLocation(start).getRegionInfo());
                check.add(regionLocator.getRegionLocation(split).getRegionInfo());
                for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) {
                    byte[] sk = hri.getStartKey();
                    if (sk.length == 0)
                        sk = splitAlgo.firstRow();
                    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true);
                    // Check every Column Family for that region -- check does not have references.
                    boolean refFound = false;
                    for (HColumnDescriptor c : htd.getFamilies()) {
                        if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
                            break;
                        }
                    }
                    // compaction is completed when all reference files are gone
                    if (!refFound) {
                        check.remove(hri);
                    }
                }
                if (check.isEmpty()) {
                    finished.add(region);
                } else {
                    physicalSplitting.add(region);
                }
            } catch (NoServerForRegionException nsfre) {
                LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
                physicalSplitting.add(region);
                ((ClusterConnection) connection).clearRegionCache();
            }
        }
        LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + " split wait / " + physicalSplitting.size() + " reference wait");
        return finished;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) NoServerForRegionException(org.apache.hadoop.hbase.client.NoServerForRegionException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem)

Example 10 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class ProtobufUtil method convertToHTableDesc.

/**
   * Converts a TableSchema to HTableDescriptor
   * @param ts A pb TableSchema instance.
   * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
   */
public static HTableDescriptor convertToHTableDesc(final TableSchema ts) {
    List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
    HColumnDescriptor[] hcds = new HColumnDescriptor[list.size()];
    int index = 0;
    for (ColumnFamilySchema cfs : list) {
        hcds[index++] = ProtobufUtil.convertToHColumnDesc(cfs);
    }
    HTableDescriptor htd = new HTableDescriptor(ProtobufUtil.toTableName(ts.getTableName()));
    for (HColumnDescriptor hcd : hcds) {
        htd.addFamily(hcd);
    }
    for (BytesBytesPair a : ts.getAttributesList()) {
        htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
    }
    for (NameStringPair a : ts.getConfigurationList()) {
        htd.setConfiguration(a.getName(), a.getValue());
    }
    return htd;
}
Also used : ColumnFamilySchema(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) NameStringPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair) BytesBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)671 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)554 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)117 Admin (org.apache.hadoop.hbase.client.Admin)110 IOException (java.io.IOException)109 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)71 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)51 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38