Search in sources :

Example 56 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class HBaseFsck method adoptHdfsOrphan.

/**
   * Orphaned regions are regions without a .regioninfo file in them.  We "adopt"
   * these orphans by creating a new region, and moving the column families,
   * recovered edits, WALs, into the new region dir.  We determine the region
   * startkey and endkeys by looking at all of the hfiles inside the column
   * families to identify the min and max keys. The resulting region will
   * likely violate table integrity but will be dealt with by merging
   * overlapping regions.
   */
@SuppressWarnings("deprecation")
private void adoptHdfsOrphan(HbckInfo hi) throws IOException {
    Path p = hi.getHdfsRegionDir();
    FileSystem fs = p.getFileSystem(getConf());
    FileStatus[] dirs = fs.listStatus(p);
    if (dirs == null) {
        LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + p + ". This dir could probably be deleted.");
        return;
    }
    TableName tableName = hi.getTableName();
    TableInfo tableInfo = tablesInfo.get(tableName);
    Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
    HTableDescriptor template = tableInfo.getHTD();
    // find min and max key values
    Pair<byte[], byte[]> orphanRegionRange = null;
    for (FileStatus cf : dirs) {
        String cfName = cf.getPath().getName();
        // TODO Figure out what the special dirs are
        if (cfName.startsWith(".") || cfName.equals(HConstants.SPLIT_LOGDIR_NAME))
            continue;
        FileStatus[] hfiles = fs.listStatus(cf.getPath());
        for (FileStatus hfile : hfiles) {
            byte[] start, end;
            HFile.Reader hf = null;
            try {
                CacheConfig cacheConf = new CacheConfig(getConf());
                hf = HFile.createReader(fs, hfile.getPath(), cacheConf, getConf());
                hf.loadFileInfo();
                Cell startKv = hf.getFirstKey();
                start = CellUtil.cloneRow(startKv);
                Cell endKv = hf.getLastKey();
                end = CellUtil.cloneRow(endKv);
            } catch (IOException ioe) {
                LOG.warn("Problem reading orphan file " + hfile + ", skipping");
                continue;
            } catch (NullPointerException ioe) {
                LOG.warn("Orphan file " + hfile + " is possibly corrupted HFile, skipping");
                continue;
            } finally {
                if (hf != null) {
                    hf.close();
                }
            }
            // expand the range to include the range of all hfiles
            if (orphanRegionRange == null) {
                // first range
                orphanRegionRange = new Pair<>(start, end);
            } else {
                // expand range only if the hfile is wider.
                if (Bytes.compareTo(orphanRegionRange.getFirst(), start) > 0) {
                    orphanRegionRange.setFirst(start);
                }
                if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0) {
                    orphanRegionRange.setSecond(end);
                }
            }
        }
    }
    if (orphanRegionRange == null) {
        LOG.warn("No data in dir " + p + ", sidelining data");
        fixes++;
        sidelineRegionDir(fs, hi);
        return;
    }
    LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + Bytes.toString(orphanRegionRange.getSecond()) + ")");
    // create new region on hdfs. move data into place.
    HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(), Bytes.add(orphanRegionRange.getSecond(), new byte[1]));
    LOG.info("Creating new region : " + hri);
    HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
    Path target = region.getRegionFileSystem().getRegionDir();
    // rename all the data to new region
    mergeRegionDirs(target, hi);
    fixes++;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Cell(org.apache.hadoop.hbase.Cell)

Example 57 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class FSTableDescriptors method createMetaTableDescriptor.

@VisibleForTesting
public static HTableDescriptor createMetaTableDescriptor(final Configuration conf) throws IOException {
    HTableDescriptor metaDescriptor = new HTableDescriptor(TableName.META_TABLE_NAME, new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.TABLE_FAMILY).setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true) }) {
    };
    metaDescriptor.addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", null, Coprocessor.PRIORITY_SYSTEM, null);
    return metaDescriptor;
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 58 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class HBaseFsck method fabricateTableInfo.

/**
   * To fabricate a .tableinfo file with following contents<br>
   * 1. the correct tablename <br>
   * 2. the correct colfamily list<br>
   * 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
   * @throws IOException
   */
private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, Set<String> columns) throws IOException {
    if (columns == null || columns.isEmpty())
        return false;
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (String columnfamimly : columns) {
        htd.addFamily(new HColumnDescriptor(columnfamimly));
    }
    fstd.createTableDescriptor(htd, true);
    return true;
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 59 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class HBaseFsck method getHTableDescriptors.

HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
    HTableDescriptor[] htd = new HTableDescriptor[0];
    LOG.info("getHTableDescriptors == tableNames => " + tableNames);
    try (Connection conn = ConnectionFactory.createConnection(getConf());
        Admin admin = conn.getAdmin()) {
        htd = admin.getTableDescriptorsByTableName(tableNames);
    } catch (IOException e) {
        LOG.debug("Exception getting table descriptors", e);
    }
    return htd;
}
Also used : ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 60 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestAsyncTableAdminApi method verifyTableDescriptor.

private void verifyTableDescriptor(final TableName tableName, final byte[]... families) throws IOException {
    Admin admin = TEST_UTIL.getAdmin();
    // Verify descriptor from master
    HTableDescriptor htd = admin.getTableDescriptor(tableName);
    verifyTableDescriptor(htd, tableName, families);
    // Verify descriptor from HDFS
    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
    HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
    verifyTableDescriptor(td, tableName, families);
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)867 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)555 Test (org.junit.Test)425 TableName (org.apache.hadoop.hbase.TableName)258 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)171 IOException (java.io.IOException)167 Put (org.apache.hadoop.hbase.client.Put)149 Table (org.apache.hadoop.hbase.client.Table)134 Path (org.apache.hadoop.fs.Path)127 Admin (org.apache.hadoop.hbase.client.Admin)121 Configuration (org.apache.hadoop.conf.Configuration)87 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)62 Connection (org.apache.hadoop.hbase.client.Connection)57 Scan (org.apache.hadoop.hbase.client.Scan)51 Cell (org.apache.hadoop.hbase.Cell)44 Delete (org.apache.hadoop.hbase.client.Delete)44 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)43