Search in sources :

Example 61 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class AccessController method createACLTable.

/**
 * Create the ACL table
 * @throws IOException
 */
private static void createACLTable(Admin admin) throws IOException {
    /**
     * Table descriptor for ACL table
     */
    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY).setMaxVersions(1).setInMemory(true).setBlockCacheEnabled(true).setBlocksize(8 * 1024).setBloomFilterType(BloomType.NONE).setScope(HConstants.REPLICATION_SCOPE_LOCAL).build();
    TableDescriptor td = TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME).setColumnFamily(cfd).build();
    admin.createTable(td);
}
Also used : ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 62 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class RegionSplitter method splitScan.

static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList, final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException {
    LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
    // Get table info
    Pair<Path, Path> tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName);
    Path tableDir = tableDirAndSplitFile.getFirst();
    FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
    // Clear the cache to forcibly refresh region information
    connection.clearRegionLocationCache();
    TableDescriptor htd = null;
    try (Table table = connection.getTable(tableName)) {
        htd = table.getDescriptor();
    }
    try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
        // for every region that hasn't been verified as a finished split
        for (Pair<byte[], byte[]> region : regionList) {
            byte[] start = region.getFirst();
            byte[] split = region.getSecond();
            // see if the new split daughter region has come online
            try {
                RegionInfo dri = regionLocator.getRegionLocation(split, true).getRegion();
                if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
                    logicalSplitting.add(region);
                    continue;
                }
            } catch (NoServerForRegionException nsfre) {
                // NSFRE will occur if the old hbase:meta entry has no server assigned
                LOG.info(nsfre.toString(), nsfre);
                logicalSplitting.add(region);
                continue;
            }
            try {
                // when a daughter region is opened, a compaction is triggered
                // wait until compaction completes for both daughter regions
                LinkedList<RegionInfo> check = Lists.newLinkedList();
                check.add(regionLocator.getRegionLocation(start).getRegion());
                check.add(regionLocator.getRegionLocation(split).getRegion());
                for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) {
                    byte[] sk = hri.getStartKey();
                    if (sk.length == 0)
                        sk = splitAlgo.firstRow();
                    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true);
                    // Check every Column Family for that region -- check does not have references.
                    boolean refFound = false;
                    for (ColumnFamilyDescriptor c : htd.getColumnFamilies()) {
                        if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
                            break;
                        }
                    }
                    // compaction is completed when all reference files are gone
                    if (!refFound) {
                        check.remove(hri);
                    }
                }
                if (check.isEmpty()) {
                    finished.add(region);
                } else {
                    physicalSplitting.add(region);
                }
            } catch (NoServerForRegionException nsfre) {
                LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
                physicalSplitting.add(region);
                connection.clearRegionLocationCache();
            }
        }
        LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + " split wait / " + physicalSplitting.size() + " reference wait");
        return finished;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) NoServerForRegionException(org.apache.hadoop.hbase.client.NoServerForRegionException) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem)

Example 63 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TableDescriptorChecker method checkCompactionPolicy.

private static void checkCompactionPolicy(Configuration conf, TableDescriptor td) throws IOException {
    // FIFO compaction has some requirements
    // Actually FCP ignores periodic major compactions
    String className = td.getValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
    if (className == null) {
        className = conf.get(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, ExploringCompactionPolicy.class.getName());
    }
    int blockingFileCount = HStore.DEFAULT_BLOCKING_STOREFILE_COUNT;
    String sv = td.getValue(HStore.BLOCKING_STOREFILES_KEY);
    if (sv != null) {
        blockingFileCount = Integer.parseInt(sv);
    } else {
        blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
    }
    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
        String compactionPolicy = hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
        if (compactionPolicy == null) {
            compactionPolicy = className;
        }
        if (!compactionPolicy.equals(FIFOCompactionPolicy.class.getName())) {
            continue;
        }
        // FIFOCompaction
        String message = null;
        // 1. Check TTL
        if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
            message = "Default TTL is not supported for FIFO compaction";
            throw new IOException(message);
        }
        // 2. Check min versions
        if (hcd.getMinVersions() > 0) {
            message = "MIN_VERSION > 0 is not supported for FIFO compaction";
            throw new IOException(message);
        }
        // 3. blocking file count
        sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
        if (sv != null) {
            blockingFileCount = Integer.parseInt(sv);
        }
        if (blockingFileCount < 1000) {
            message = "Blocking file count '" + HStore.BLOCKING_STOREFILES_KEY + "' " + blockingFileCount + " is below recommended minimum of 1000 for column family " + hcd.getNameAsString();
            throw new IOException(message);
        }
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)

Example 64 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TestCoreRegionCoprocessor method before.

@Before
public void before() throws IOException {
    String methodName = this.name.getMethodName();
    TableName tn = TableName.valueOf(methodName);
    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(methodName)).build();
    TableDescriptor td = TableDescriptorBuilder.newBuilder(tn).setColumnFamily(cfd).build();
    RegionInfo ri = RegionInfoBuilder.newBuilder(tn).build();
    this.rss = new MockRegionServerServices(HTU.getConfiguration());
    this.region = HRegion.openHRegion(ri, td, null, HTU.getConfiguration(), this.rss, null);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) MockRegionServerServices(org.apache.hadoop.hbase.MockRegionServerServices) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Before(org.junit.Before)

Example 65 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class MasterProcedureTestingUtility method validateColumnFamilyModification.

public static void validateColumnFamilyModification(final HMaster master, final TableName tableName, final String family, ColumnFamilyDescriptor columnDescriptor) throws IOException {
    TableDescriptor htd = master.getTableDescriptors().get(tableName);
    assertTrue(htd != null);
    ColumnFamilyDescriptor hcfd = htd.getColumnFamily(Bytes.toBytes(family));
    assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor));
}
Also used : ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Aggregations

ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)199 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)95 Test (org.junit.Test)92 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)78 IOException (java.io.IOException)44 TableName (org.apache.hadoop.hbase.TableName)44 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)42 Path (org.apache.hadoop.fs.Path)41 Admin (org.apache.hadoop.hbase.client.Admin)36 Configuration (org.apache.hadoop.conf.Configuration)34 ArrayList (java.util.ArrayList)32 Put (org.apache.hadoop.hbase.client.Put)32 FileSystem (org.apache.hadoop.fs.FileSystem)28 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)24 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)22 Get (org.apache.hadoop.hbase.client.Get)20 Result (org.apache.hadoop.hbase.client.Result)19 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)17 Scan (org.apache.hadoop.hbase.client.Scan)17 Table (org.apache.hadoop.hbase.client.Table)17