use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBase10CDHTableUtil method listTablesInNamespace.
@Override
public List<TableId> listTablesInNamespace(HBaseAdmin admin, String namespaceId) throws IOException {
List<TableId> tableIds = Lists.newArrayList();
HTableDescriptor[] hTableDescriptors = admin.listTableDescriptorsByNamespace(HTableNameConverter.encodeHBaseEntity(namespaceId));
for (HTableDescriptor hTableDescriptor : hTableDescriptors) {
if (isCDAPTable(hTableDescriptor)) {
tableIds.add(HTableNameConverter.from(hTableDescriptor));
}
}
return tableIds;
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class LevelDBTableService method getTableStats.
/**
* Gets tables stats.
*
* @return map of table name -> table stats entries
* @throws Exception
*/
public Map<TableId, TableStats> getTableStats() throws Exception {
ensureOpen();
File baseDir = new File(basePath);
File[] subDirs = baseDir.listFiles();
if (subDirs == null) {
return ImmutableMap.of();
}
ImmutableMap.Builder<TableId, TableStats> builder = ImmutableMap.builder();
for (File dir : subDirs) {
String tableName = getTableName(dir.getName());
// NOTE: we are using recursion to traverse file tree as we know that leveldb table fs tree is couple levels deep.
long size = getSize(dir);
builder.put(LevelDBNameConverter.from(tableName), new TableStats(size));
}
return builder.build();
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseMetricsTable method putBytes.
@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
byte[] distributedKey = createDistributedRowKey(row.getKey());
PutBuilder put = tableUtil.buildPut(distributedKey);
for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
put.add(columnFamily, column.getKey(), column.getValue());
}
puts.add(put.build());
}
try {
mutator.mutate(puts);
mutator.flush();
} catch (IOException e) {
throw new DataSetException("Put failed on table " + tableId, e);
}
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseMetricsTable method delete.
@Override
public void delete(byte[] row, byte[][] columns) {
byte[] distributedKey = createDistributedRowKey(row);
DeleteBuilder delete = tableUtil.buildDelete(distributedKey);
for (byte[] column : columns) {
delete.deleteColumns(columnFamily, column);
}
try {
table.delete(delete.build());
} catch (IOException e) {
throw new DataSetException("Delete failed on table " + tableId, e);
}
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseTableAdmin method create.
@Override
public void create() throws IOException {
String columnFamily = Bytes.toString(TableProperties.getColumnFamilyBytes(spec.getProperties()));
ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(columnFamily, hConf);
if (TableProperties.getReadlessIncrementSupport(spec.getProperties())) {
cfdBuilder.setMaxVersions(Integer.MAX_VALUE);
} else if (DatasetsUtil.isTransactional(spec.getProperties())) {
// NOTE: we cannot limit number of versions as there's no hard limit on # of excluded from read txs
cfdBuilder.setMaxVersions(Integer.MAX_VALUE);
} else {
cfdBuilder.setMaxVersions(1);
}
cfdBuilder.setBloomType(ColumnFamilyDescriptor.BloomType.ROW);
Long ttl = TableProperties.getTTL(spec.getProperties());
if (ttl != null) {
// convert ttl from seconds to milli-seconds
ttl = TimeUnit.SECONDS.toMillis(ttl);
cfdBuilder.addProperty(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
}
final TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf);
// if the dataset is configured for read-less increments, then set the table property to support upgrades
boolean supportsReadlessIncrements = TableProperties.getReadlessIncrementSupport(spec.getProperties());
if (supportsReadlessIncrements) {
tdBuilder.addProperty(Table.PROPERTY_READLESS_INCREMENT, "true");
}
// if the dataset is configured to be non-transactional, then set the table property to support upgrades
if (!DatasetsUtil.isTransactional(spec.getProperties())) {
tdBuilder.addProperty(Constants.Dataset.TABLE_TX_DISABLED, "true");
if (supportsReadlessIncrements) {
// read-less increments CPs by default assume that table is transactional
cfdBuilder.addProperty("dataset.table.readless.increment.transactional", "false");
}
}
tdBuilder.addColumnFamily(cfdBuilder.build());
CoprocessorJar coprocessorJar = createCoprocessorJar();
for (Class<? extends Coprocessor> coprocessor : coprocessorJar.getCoprocessors()) {
tdBuilder.addCoprocessor(coprocessorManager.getCoprocessorDescriptor(coprocessor, coprocessorJar.getPriority(coprocessor)));
}
byte[][] splits = null;
String splitsProperty = spec.getProperty(PROPERTY_SPLITS);
if (splitsProperty != null) {
splits = GSON.fromJson(splitsProperty, byte[][].class);
}
// Disable split policy
String splitsPolicy = spec.getProperty(SPLIT_POLICY);
if (!Strings.isNullOrEmpty(splitsPolicy)) {
tdBuilder.addProperty(HTableDescriptor.SPLIT_POLICY, splitsPolicy);
}
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
ddlExecutor.createTableIfNotExists(tdBuilder.build(), splits);
try {
Map<String, String> permissions = TableProperties.getTablePermissions(spec.getProperties());
if (permissions != null && !permissions.isEmpty()) {
tableUtil.grantPermissions(ddlExecutor, tableId, permissions);
}
} catch (IOException | RuntimeException e) {
try {
drop();
} catch (Throwable t) {
e.addSuppressed(t);
}
throw e;
}
}
}
Aggregations