use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestTableScan method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
conf.set(Constants.CUSTOM_FILTERS, "CustomFilter:" + CustomFilter.class.getName());
TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(conf);
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
Admin admin = TEST_UTIL.getAdmin();
if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(CFA));
htd.addFamily(new HColumnDescriptor(CFB));
admin.createTable(htd);
expectedRows1 = TestScannerResource.insertData(conf, TABLE, COLUMN_1, 1.0);
expectedRows2 = TestScannerResource.insertData(conf, TABLE, COLUMN_2, 0.5);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class ImportTsv method createTable.
private static void createTable(Admin admin, TableName tableName, String[] columns) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
Set<String> cfSet = getColumnFamilies(columns);
for (String cf : cfSet) {
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(cf));
htd.addFamily(hcd);
}
LOG.warn(format("Creating table '%s' with '%s' columns and default descriptors.", tableName, cfSet));
admin.createTable(htd);
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TableSnapshotInputFormatImpl method getSplits.
public static List<InputSplit> getSplits(Scan scan, SnapshotManifest manifest, List<HRegionInfo> regionManifests, Path restoreDir, Configuration conf) throws IOException {
// load table descriptor
HTableDescriptor htd = manifest.getTableDescriptor();
Path tableDir = FSUtils.getTableDir(restoreDir, htd.getTableName());
List<InputSplit> splits = new ArrayList<>();
for (HRegionInfo hri : regionManifests) {
if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(), hri.getEndKey())) {
// compute HDFS locations from snapshot files (which will get the locations for
// referred hfiles)
List<String> hosts = getBestLocations(conf, HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir));
int len = Math.min(3, hosts.size());
hosts = hosts.subList(0, len);
splits.add(new InputSplit(htd, hri, hosts, scan, restoreDir));
}
}
return splits;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class IncreasingToUpperBoundRegionSplitPolicy method configureForRegion.
@Override
protected void configureForRegion(HRegion region) {
super.configureForRegion(region);
Configuration conf = getConf();
initialSize = conf.getLong("hbase.increasing.policy.initial.size", -1);
if (initialSize > 0) {
return;
}
HTableDescriptor desc = region.getTableDesc();
if (desc != null) {
initialSize = 2 * desc.getMemStoreFlushSize();
}
if (initialSize <= 0) {
initialSize = 2 * conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class FSTableDescriptors method readTableDescriptor.
private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status) throws IOException {
int len = Ints.checkedCast(status.getLen());
byte[] content = new byte[len];
FSDataInputStream fsDataInputStream = fs.open(status.getPath());
try {
fsDataInputStream.readFully(content);
} finally {
fsDataInputStream.close();
}
HTableDescriptor htd = null;
try {
htd = HTableDescriptor.parseFrom(content);
} catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toShort(content), e);
}
return htd;
}
Aggregations