Search in sources :

Example 11 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class RegionSplitter method splitScan.

static LinkedList<Pair<byte[], byte[]>> splitScan(LinkedList<Pair<byte[], byte[]>> regionList, final Connection connection, final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException {
    LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
    LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
    // Get table info
    Pair<Path, Path> tableDirAndSplitFile = getTableDirAndSplitFile(connection.getConfiguration(), tableName);
    Path tableDir = tableDirAndSplitFile.getFirst();
    FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
    // Clear the cache to forcibly refresh region information
    ((ClusterConnection) connection).clearRegionCache();
    HTableDescriptor htd = null;
    try (Table table = connection.getTable(tableName)) {
        htd = table.getTableDescriptor();
    }
    try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
        // for every region that hasn't been verified as a finished split
        for (Pair<byte[], byte[]> region : regionList) {
            byte[] start = region.getFirst();
            byte[] split = region.getSecond();
            // see if the new split daughter region has come online
            try {
                HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo();
                if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
                    logicalSplitting.add(region);
                    continue;
                }
            } catch (NoServerForRegionException nsfre) {
                // NSFRE will occur if the old hbase:meta entry has no server assigned
                LOG.info(nsfre);
                logicalSplitting.add(region);
                continue;
            }
            try {
                // when a daughter region is opened, a compaction is triggered
                // wait until compaction completes for both daughter regions
                LinkedList<HRegionInfo> check = Lists.newLinkedList();
                check.add(regionLocator.getRegionLocation(start).getRegionInfo());
                check.add(regionLocator.getRegionLocation(split).getRegionInfo());
                for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) {
                    byte[] sk = hri.getStartKey();
                    if (sk.length == 0)
                        sk = splitAlgo.firstRow();
                    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(connection.getConfiguration(), fs, tableDir, hri, true);
                    // Check every Column Family for that region -- check does not have references.
                    boolean refFound = false;
                    for (HColumnDescriptor c : htd.getFamilies()) {
                        if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
                            break;
                        }
                    }
                    // compaction is completed when all reference files are gone
                    if (!refFound) {
                        check.remove(hri);
                    }
                }
                if (check.isEmpty()) {
                    finished.add(region);
                } else {
                    physicalSplitting.add(region);
                }
            } catch (NoServerForRegionException nsfre) {
                LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
                physicalSplitting.add(region);
                ((ClusterConnection) connection).clearRegionCache();
            }
        }
        LOG.debug("Split Scan: " + finished.size() + " finished / " + logicalSplitting.size() + " split wait / " + physicalSplitting.size() + " reference wait");
        return finished;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) NoServerForRegionException(org.apache.hadoop.hbase.client.NoServerForRegionException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem)

Example 12 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class ProtobufUtil method convertToHTableDesc.

/**
   * Converts a TableSchema to HTableDescriptor
   * @param ts A pb TableSchema instance.
   * @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
   */
public static HTableDescriptor convertToHTableDesc(final TableSchema ts) {
    List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
    HColumnDescriptor[] hcds = new HColumnDescriptor[list.size()];
    int index = 0;
    for (ColumnFamilySchema cfs : list) {
        hcds[index++] = ProtobufUtil.convertToHColumnDesc(cfs);
    }
    HTableDescriptor htd = new HTableDescriptor(ProtobufUtil.toTableName(ts.getTableName()));
    for (HColumnDescriptor hcd : hcds) {
        htd.addFamily(hcd);
    }
    for (BytesBytesPair a : ts.getAttributesList()) {
        htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
    }
    for (NameStringPair a : ts.getConfigurationList()) {
        htd.setConfiguration(a.getName(), a.getValue());
    }
    return htd;
}
Also used : ColumnFamilySchema(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) NameStringPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair) BytesBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 13 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class IntegrationTestMTTR method setupTables.

private static void setupTables() throws IOException {
    // Get the table name.
    tableName = TableName.valueOf(util.getConfiguration().get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR"));
    loadTableName = TableName.valueOf(util.getConfiguration().get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool"));
    if (util.getAdmin().tableExists(tableName)) {
        util.deleteTable(tableName);
    }
    if (util.getAdmin().tableExists(loadTableName)) {
        util.deleteTable(loadTableName);
    }
    // Create the table.  If this fails then fail everything.
    HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
    // Make the max file size huge so that splits don't happen during the test.
    tableDescriptor.setMaxFileSize(Long.MAX_VALUE);
    HColumnDescriptor descriptor = new HColumnDescriptor(FAMILY);
    descriptor.setMaxVersions(1);
    tableDescriptor.addFamily(descriptor);
    util.getAdmin().createTable(tableDescriptor);
    // Setup the table for LoadTestTool
    int ret = loadTool.run(new String[] { "-tn", loadTableName.getNameAsString(), "-init_only" });
    assertEquals("Failed to initialize LoadTestTool", 0, ret);
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 14 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class RemoveColumnAction method perform.

@Override
public void perform() throws Exception {
    HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
    HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
    if (columnDescriptors.length <= (protectedColumns == null ? 1 : protectedColumns.size())) {
        return;
    }
    int index = random.nextInt(columnDescriptors.length);
    while (protectedColumns != null && protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
        index = random.nextInt(columnDescriptors.length);
    }
    byte[] colDescName = columnDescriptors[index].getName();
    LOG.debug("Performing action: Removing " + Bytes.toString(colDescName) + " from " + tableName.getNameAsString());
    tableDescriptor.removeFamily(colDescName);
    // Don't try the modify if we're stopping
    if (context.isStopping()) {
        return;
    }
    admin.modifyTable(tableName, tableDescriptor);
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 15 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class ChangeCompressionAction method perform.

@Override
public void perform() throws Exception {
    HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
    HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
    if (columnDescriptors == null || columnDescriptors.length == 0) {
        return;
    }
    // Possible compression algorithms. If an algorithm is not supported,
    // modifyTable will fail, so there is no harm.
    Algorithm[] possibleAlgos = Algorithm.values();
    // Since not every compression algorithm is supported,
    // let's use the same algorithm for all column families.
    // If an unsupported compression algorithm is chosen, pick a different one.
    // This is to work around the issue that modifyTable() does not throw remote
    // exception.
    Algorithm algo;
    do {
        algo = possibleAlgos[random.nextInt(possibleAlgos.length)];
        try {
            Compressor c = algo.getCompressor();
            // call returnCompressor() to release the Compressor
            algo.returnCompressor(c);
            break;
        } catch (Throwable t) {
            LOG.info("Performing action: Changing compression algorithms to " + algo + " is not supported, pick another one");
        }
    } while (true);
    LOG.debug("Performing action: Changing compression algorithms on " + tableName.getNameAsString() + " to " + algo);
    for (HColumnDescriptor descriptor : columnDescriptors) {
        if (random.nextBoolean()) {
            descriptor.setCompactionCompressionType(algo);
        } else {
            descriptor.setCompressionType(algo);
        }
    }
    // Don't try the modify if we're stopping
    if (context.isStopping()) {
        return;
    }
    admin.modifyTable(tableName, tableDescriptor);
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Compressor(org.apache.hadoop.io.compress.Compressor) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)679 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)561 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)118 IOException (java.io.IOException)112 Admin (org.apache.hadoop.hbase.client.Admin)112 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)74 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)52 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38