use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class AsyncHBaseAdmin method getTableDescriptor.
@Override
public CompletableFuture<HTableDescriptor> getTableDescriptor(TableName tableName) {
CompletableFuture<HTableDescriptor> future = new CompletableFuture<>();
this.<List<TableSchema>>newMasterCaller().action((controller, stub) -> this.<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>>call(controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp.getTableSchemaList())).call().whenComplete((tableSchemas, error) -> {
if (error != null) {
future.completeExceptionally(error);
return;
}
if (!tableSchemas.isEmpty()) {
future.complete(ProtobufUtil.convertToHTableDesc(tableSchemas.get(0)));
} else {
future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
}
});
return future;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestBatchCoprocessorEndpoint method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
// set configure to indicate which cp should be loaded
Configuration conf = util.getConfiguration();
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), ProtobufCoprocessorService.class.getName(), ColumnAggregationEndpointWithErrors.class.getName(), ColumnAggregationEndpointNullResponse.class.getName());
conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, ProtobufCoprocessorService.class.getName());
util.startMiniCluster(2);
Admin admin = util.getAdmin();
HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] });
util.waitUntilAllRegionsAssigned(TEST_TABLE);
admin.close();
Table table = util.getConnection().getTable(TEST_TABLE);
for (int i = 0; i < ROWSIZE; i++) {
Put put = new Put(ROWS[i]);
put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
table.put(put);
}
table.close();
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestCoprocessorEndpoint method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
// set configure to indicate which cp should be loaded
Configuration conf = util.getConfiguration();
conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000);
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), ProtobufCoprocessorService.class.getName());
conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, ProtobufCoprocessorService.class.getName());
util.startMiniCluster(2);
Admin admin = util.getAdmin();
HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] });
util.waitUntilAllRegionsAssigned(TEST_TABLE);
Table table = util.getConnection().getTable(TEST_TABLE);
for (int i = 0; i < ROWSIZE; i++) {
Put put = new Put(ROWS[i]);
put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
table.put(put);
}
table.close();
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class BackupManager method createBackupInfo.
/**
* Creates a backup info based on input backup request.
* @param backupId backup id
* @param type type
* @param tableList table list
* @param targetRootDir root dir
* @param workers number of parallel workers
* @param bandwidth bandwidth per worker in MB per sec
* @return BackupInfo
* @throws BackupException exception
*/
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList, String targetRootDir, int workers, long bandwidth) throws BackupException {
if (targetRootDir == null) {
throw new BackupException("Wrong backup request parameter: target backup root directory");
}
if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
// If table list is null for full backup, which means backup all tables. Then fill the table
// list with all user tables from meta. It no table available, throw the request exception.
HTableDescriptor[] htds = null;
try (Admin admin = conn.getAdmin()) {
htds = admin.listTables();
} catch (Exception e) {
throw new BackupException(e);
}
if (htds == null) {
throw new BackupException("No table exists for full backup of all tables.");
} else {
tableList = new ArrayList<>();
for (HTableDescriptor hTableDescriptor : htds) {
TableName tn = hTableDescriptor.getTableName();
if (tn.equals(BackupSystemTable.getTableName(conf))) {
// skip backup system table
continue;
}
tableList.add(hTableDescriptor.getTableName());
}
LOG.info("Full backup all the tables available in the cluster: " + tableList);
}
}
// there are one or more tables in the table list
backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]), targetRootDir);
backupInfo.setBandwidth(bandwidth);
backupInfo.setWorkers(workers);
return backupInfo;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestRSGroupBasedLoadBalancer method getTableName.
private TableName getTableName(ServerName sn) throws IOException {
TableName tableName = null;
RSGroupInfoManager gm = getMockedGroupInfoManager();
RSGroupInfo groupOfServer = null;
for (RSGroupInfo gInfo : gm.listRSGroups()) {
if (gInfo.containsServer(sn.getAddress())) {
groupOfServer = gInfo;
break;
}
}
for (HTableDescriptor desc : tableDescs) {
if (gm.getRSGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())) {
tableName = desc.getTableName();
}
}
return tableName;
}
Aggregations