use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class HBaseFsck method adoptHdfsOrphan.
/**
* Orphaned regions are regions without a .regioninfo file in them. We "adopt"
* these orphans by creating a new region, and moving the column families,
* recovered edits, WALs, into the new region dir. We determine the region
* startkey and endkeys by looking at all of the hfiles inside the column
* families to identify the min and max keys. The resulting region will
* likely violate table integrity but will be dealt with by merging
* overlapping regions.
*/
@SuppressWarnings("deprecation")
private void adoptHdfsOrphan(HbckInfo hi) throws IOException {
Path p = hi.getHdfsRegionDir();
FileSystem fs = p.getFileSystem(getConf());
FileStatus[] dirs = fs.listStatus(p);
if (dirs == null) {
LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + p + ". This dir could probably be deleted.");
return;
}
TableName tableName = hi.getTableName();
TableInfo tableInfo = tablesInfo.get(tableName);
Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
HTableDescriptor template = tableInfo.getHTD();
// find min and max key values
Pair<byte[], byte[]> orphanRegionRange = null;
for (FileStatus cf : dirs) {
String cfName = cf.getPath().getName();
// TODO Figure out what the special dirs are
if (cfName.startsWith(".") || cfName.equals(HConstants.SPLIT_LOGDIR_NAME))
continue;
FileStatus[] hfiles = fs.listStatus(cf.getPath());
for (FileStatus hfile : hfiles) {
byte[] start, end;
HFile.Reader hf = null;
try {
CacheConfig cacheConf = new CacheConfig(getConf());
hf = HFile.createReader(fs, hfile.getPath(), cacheConf, getConf());
hf.loadFileInfo();
Cell startKv = hf.getFirstKey();
start = CellUtil.cloneRow(startKv);
Cell endKv = hf.getLastKey();
end = CellUtil.cloneRow(endKv);
} catch (IOException ioe) {
LOG.warn("Problem reading orphan file " + hfile + ", skipping");
continue;
} catch (NullPointerException ioe) {
LOG.warn("Orphan file " + hfile + " is possibly corrupted HFile, skipping");
continue;
} finally {
if (hf != null) {
hf.close();
}
}
// expand the range to include the range of all hfiles
if (orphanRegionRange == null) {
// first range
orphanRegionRange = new Pair<>(start, end);
} else {
// expand range only if the hfile is wider.
if (Bytes.compareTo(orphanRegionRange.getFirst(), start) > 0) {
orphanRegionRange.setFirst(start);
}
if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0) {
orphanRegionRange.setSecond(end);
}
}
}
}
if (orphanRegionRange == null) {
LOG.warn("No data in dir " + p + ", sidelining data");
fixes++;
sidelineRegionDir(fs, hi);
return;
}
LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + Bytes.toString(orphanRegionRange.getSecond()) + ")");
// create new region on hdfs. move data into place.
HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(), Bytes.add(orphanRegionRange.getSecond(), new byte[1]));
LOG.info("Creating new region : " + hri);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
Path target = region.getRegionFileSystem().getRegionDir();
// rename all the data to new region
mergeRegionDirs(target, hi);
fixes++;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class FSTableDescriptors method createMetaTableDescriptor.
@VisibleForTesting
public static HTableDescriptor createMetaTableDescriptor(final Configuration conf) throws IOException {
HTableDescriptor metaDescriptor = new HTableDescriptor(TableName.META_TABLE_NAME, new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY).setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)).setInMemory(true).setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true), new HColumnDescriptor(HConstants.TABLE_FAMILY).setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024).setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE).setCacheDataInL1(true) }) {
};
metaDescriptor.addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", null, Coprocessor.PRIORITY_SYSTEM, null);
return metaDescriptor;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class HBaseFsck method fabricateTableInfo.
/**
* To fabricate a .tableinfo file with following contents<br>
* 1. the correct tablename <br>
* 2. the correct colfamily list<br>
* 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @throws IOException
*/
private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, Set<String> columns) throws IOException {
if (columns == null || columns.isEmpty())
return false;
HTableDescriptor htd = new HTableDescriptor(tableName);
for (String columnfamimly : columns) {
htd.addFamily(new HColumnDescriptor(columnfamimly));
}
fstd.createTableDescriptor(htd, true);
return true;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class HBaseFsck method getHTableDescriptors.
HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
HTableDescriptor[] htd = new HTableDescriptor[0];
LOG.info("getHTableDescriptors == tableNames => " + tableNames);
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
htd = admin.getTableDescriptorsByTableName(tableNames);
} catch (IOException e) {
LOG.debug("Exception getting table descriptors", e);
}
return htd;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestAsyncTableAdminApi method verifyTableDescriptor.
private void verifyTableDescriptor(final TableName tableName, final byte[]... families) throws IOException {
Admin admin = TEST_UTIL.getAdmin();
// Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName);
verifyTableDescriptor(htd, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}
Aggregations