use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseTableFactory method upgradeCoProcessor.
private void upgradeCoProcessor(TableId tableId, Class<? extends Coprocessor> coprocessor) throws IOException {
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
HTableDescriptor tableDescriptor;
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
// If table doesn't exist, then skip upgrading coprocessor
if (!tableUtil.tableExists(admin, tableId)) {
LOG.debug("TMS Table {} was not found. Skip upgrading coprocessor.", tableId);
return;
}
tableDescriptor = tableUtil.getHTableDescriptor(admin, tableId);
}
// Get cdap version from the table
ProjectInfo.Version version = HBaseTableUtil.getVersion(tableDescriptor);
String hbaseVersion = HBaseTableUtil.getHBaseVersion(tableDescriptor);
if (hbaseVersion != null && hbaseVersion.equals(HBaseVersion.getVersionString()) && version.compareTo(ProjectInfo.getVersion()) >= 0) {
// If cdap has version has not changed or is greater, no need to update. Just enable it, in case
// it has been disabled by the upgrade tool, and return
LOG.info("Table '{}' has not changed and its version '{}' is same or greater than current CDAP version '{}'." + " The underlying HBase version {} has also not changed.", tableId, version, ProjectInfo.getVersion(), hbaseVersion);
enableTable(ddlExecutor, tableId);
return;
}
// create a new descriptor for the table update
HTableDescriptorBuilder newDescriptor = tableUtil.buildHTableDescriptor(tableDescriptor);
// Remove old coprocessor
Map<String, HBaseTableUtil.CoprocessorInfo> coprocessorInfo = HBaseTableUtil.getCoprocessorInfo(tableDescriptor);
for (Map.Entry<String, HBaseTableUtil.CoprocessorInfo> coprocessorEntry : coprocessorInfo.entrySet()) {
newDescriptor.removeCoprocessor(coprocessorEntry.getValue().getClassName());
}
// Add new coprocessor
CoprocessorDescriptor coprocessorDescriptor = coprocessorManager.getCoprocessorDescriptor(coprocessor, Coprocessor.PRIORITY_USER);
Path path = coprocessorDescriptor.getPath() == null ? null : new Path(coprocessorDescriptor.getPath());
newDescriptor.addCoprocessor(coprocessorDescriptor.getClassName(), path, coprocessorDescriptor.getPriority(), coprocessorDescriptor.getProperties());
// Update CDAP version, table prefix
HBaseTableUtil.setVersion(newDescriptor);
HBaseTableUtil.setHBaseVersion(newDescriptor);
HBaseTableUtil.setTablePrefix(newDescriptor, cConf);
// Disable auto-splitting
newDescriptor.setValue(HTableDescriptor.SPLIT_POLICY, cConf.get(Constants.MessagingSystem.TABLE_HBASE_SPLIT_POLICY));
// Disable Table
disableTable(ddlExecutor, tableId);
tableUtil.modifyTable(ddlExecutor, newDescriptor.build());
LOG.debug("Enabling table '{}'...", tableId);
enableTable(ddlExecutor, tableId);
}
LOG.info("Table '{}' update completed.", tableId);
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseTableFactory method createTable.
/**
* Creates a new instance of {@link Table} for the given {@link TableId}. If the hbase table doesn't
* exist, a new one will be created with the given number of splits.
*/
private HTableWithRowKeyDistributor createTable(TableId tableId, int splits, Class<? extends Coprocessor> coprocessor) throws IOException {
// Lookup the table descriptor from the cache first. If it is there, we assume the HBase table exists
// Otherwise, attempt to create it.
Table table = null;
HTableDescriptor htd = tableDescriptors.get(tableId);
if (htd == null) {
synchronized (this) {
htd = tableDescriptors.get(tableId);
if (htd == null) {
boolean tableExists;
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
tableExists = tableUtil.tableExists(admin, tableId);
}
// Create the table if the table doesn't exist
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
// If table exists, then skip creating coprocessor etc
if (!tableExists) {
TableId metadataTableId = tableUtil.createHTableId(NamespaceId.SYSTEM, cConf.get(Constants.MessagingSystem.METADATA_TABLE_NAME));
ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(COLUMN_FAMILY), hConf);
TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf).addColumnFamily(cfdBuilder.build()).addProperty(Constants.MessagingSystem.HBASE_MESSAGING_TABLE_PREFIX_NUM_BYTES, Integer.toString(1)).addProperty(Constants.MessagingSystem.KEY_DISTRIBUTOR_BUCKETS_ATTR, Integer.toString(splits)).addProperty(Constants.MessagingSystem.HBASE_METADATA_TABLE_NAMESPACE, metadataTableId.getNamespace()).addProperty(HTableDescriptor.SPLIT_POLICY, cConf.get(Constants.MessagingSystem.TABLE_HBASE_SPLIT_POLICY)).addCoprocessor(coprocessorManager.getCoprocessorDescriptor(coprocessor, Coprocessor.PRIORITY_USER));
// Set the key distributor size the same as the initial number of splits,
// essentially one bucket per split.
byte[][] splitKeys = HBaseTableUtil.getSplitKeys(splits, splits, new RowKeyDistributorByHashPrefix(new OneByteSimpleHash(splits)));
ddlExecutor.createTableIfNotExists(tdBuilder.build(), splitKeys);
table = tableUtil.createTable(hConf, tableId);
htd = table.getTableDescriptor();
tableDescriptors.put(tableId, htd);
} else {
table = tableUtil.createTable(hConf, tableId);
htd = table.getTableDescriptor();
tableDescriptors.put(tableId, htd);
}
}
}
}
}
if (table == null) {
table = tableUtil.createTable(hConf, tableId);
}
return new HTableWithRowKeyDistributor(table, new RowKeyDistributorByHashPrefix(new OneByteSimpleHash(getKeyDistributorBuckets(tableId, htd))));
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseTableFactory method createPayloadTable.
@Override
public PayloadTable createPayloadTable(TopicMetadata topicMetadata) throws IOException {
TableId tableId = tableUtil.createHTableId(NamespaceId.SYSTEM, payloadTableName);
Class<? extends Coprocessor> tableCoprocessor = tableUtil.getPayloadTableRegionObserverClassForVersion();
HTableWithRowKeyDistributor tableWithRowKeyDistributor = createTable(tableId, cConf.getInt(Constants.MessagingSystem.PAYLOAD_TABLE_HBASE_SPLITS), tableCoprocessor);
return new HBasePayloadTable(tableUtil, tableWithRowKeyDistributor.getTable(), COLUMN_FAMILY, tableWithRowKeyDistributor.getRowKeyDistributor(), scanExecutor, cConf.getInt(Constants.MessagingSystem.HBASE_SCAN_CACHE_ROWS), createExceptionHandler(tableId));
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class ReplicationStatusTool method getReplicationStateTableId.
private static TableId getReplicationStateTableId(HBaseTableUtil tableUtil) throws IOException {
String tableName = hConf.get(ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_NAME);
String ns = hConf.get(ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_NAMESPACE);
TableId tableId = tableUtil.createHTableId((ns != null) ? new NamespaceId(ns) : new NamespaceId(ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_DEFAULT_NAMESPACE), (tableName != null) ? tableName : ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_DEFAULT_NAME);
return tableId;
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseQueueAdmin method upgradeQueues.
private Map<TableId, Future<?>> upgradeQueues(final NamespaceMeta namespaceMeta, ExecutorService executor, final HBaseAdmin admin) throws Exception {
String hbaseNamespace = tableUtil.getHBaseNamespace(namespaceMeta);
List<TableId> tableIds = tableUtil.listTablesInNamespace(admin, hbaseNamespace);
List<TableId> stateStoreTableIds = Lists.newArrayList();
Map<TableId, Future<?>> futures = new HashMap<>();
for (final TableId tableId : tableIds) {
// It's important to skip config table enabled.
if (isDataTable(tableId)) {
Callable<Void> callable = new Callable<Void>() {
@Override
public Void call() throws Exception {
LOG.info("Upgrading queue table: {}", tableId);
Properties properties = new Properties();
HTableDescriptor desc = tableUtil.getHTableDescriptor(admin, tableId);
if (desc.getValue(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES) == null) {
// It's the old queue table. Set the property prefix bytes to SALT_BYTES
properties.setProperty(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES, Integer.toString(SaltedHBaseQueueStrategy.SALT_BYTES));
}
upgrade(tableId, properties);
LOG.info("Upgraded queue table: {}", tableId);
return null;
}
};
Future<?> future = executor.submit(ImpersonationUtils.createImpersonatingCallable(impersonator, namespaceMeta, callable));
futures.put(tableId, future);
} else if (isStateStoreTable(tableId)) {
stateStoreTableIds.add(tableId);
}
}
// Upgrade of state store table
for (final TableId tableId : stateStoreTableIds) {
Callable<Void> callable = new Callable<Void>() {
@Override
public Void call() throws Exception {
LOG.info("Upgrading queue state store: {}", tableId);
DatasetId stateStoreId = createStateStoreDataset(namespaceMeta.getName());
DatasetAdmin datasetAdmin = datasetFramework.getAdmin(stateStoreId, null);
if (datasetAdmin == null) {
LOG.error("No dataset admin available for {}", stateStoreId);
return null;
}
datasetAdmin.upgrade();
LOG.info("Upgraded queue state store: {}", tableId);
return null;
}
};
Future<?> future = executor.submit(ImpersonationUtils.createImpersonatingCallable(impersonator, namespaceMeta, callable));
futures.put(tableId, future);
}
return futures;
}
Aggregations