use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class ConfigurationTable method write.
/**
* Writes the {@link CConfiguration} instance as a new row to the HBase table. The {@link Type} given is used as
* the row key (allowing multiple configurations to be stored). After the new configuration is written, this will
* delete any configurations written with an earlier timestamp (to prevent removed values from being visible).
* @param cConf The CConfiguration instance to store
* @throws IOException If an error occurs while writing the configuration
*/
public void write(Type type, CConfiguration cConf) throws IOException {
// must create the table if it doesn't exist
HTable table = null;
try (HBaseDDLExecutor ddlExecutor = new HBaseDDLExecutorFactory(cConf, hbaseConf).get()) {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
TableId tableId = tableUtil.createHTableId(NamespaceId.SYSTEM, TABLE_NAME);
ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(FAMILY), hbaseConf);
TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf).addColumnFamily(cfdBuilder.build());
ddlExecutor.createTableIfNotExists(tdBuilder.build(), null);
long now = System.currentTimeMillis();
long previous = now - 1;
byte[] typeBytes = Bytes.toBytes(type.name());
LOG.info("Writing new config row with key " + type);
// populate the configuration data
table = tableUtil.createHTable(hbaseConf, tableId);
table.setAutoFlush(false);
Put p = new Put(typeBytes);
for (Map.Entry<String, String> e : cConf) {
p.add(FAMILY, Bytes.toBytes(e.getKey()), now, Bytes.toBytes(e.getValue()));
}
table.put(p);
LOG.info("Deleting any configuration from " + previous + " or before");
Delete d = new Delete(typeBytes);
d.deleteFamily(FAMILY, previous);
table.delete(d);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ioe) {
LOG.error("Error closing HBaseAdmin: " + ioe.getMessage(), ioe);
}
}
}
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseQueueAdmin method upgradeQueues.
private Map<TableId, Future<?>> upgradeQueues(final NamespaceMeta namespaceMeta, ExecutorService executor) throws Exception {
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
String hbaseNamespace = tableUtil.getHBaseNamespace(namespaceMeta);
List<TableId> tableIds = tableUtil.listTablesInNamespace(admin, hbaseNamespace);
List<TableId> stateStoreTableIds = Lists.newArrayList();
Map<TableId, Future<?>> futures = new HashMap<>();
for (final TableId tableId : tableIds) {
// It's important to skip config table enabled.
if (isDataTable(tableId)) {
Runnable runnable = new Runnable() {
public void run() {
try {
LOG.info("Upgrading queue table: {}", tableId);
Properties properties = new Properties();
HTableDescriptor desc = tableUtil.getHTableDescriptor(admin, tableId);
if (desc.getValue(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES) == null) {
// It's the old queue table. Set the property prefix bytes to SALT_BYTES
properties.setProperty(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES, Integer.toString(SaltedHBaseQueueStrategy.SALT_BYTES));
}
upgrade(tableId, properties);
LOG.info("Upgraded queue table: {}", tableId);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
Future<?> future = executor.submit(runnable);
futures.put(tableId, future);
} else if (isStateStoreTable(tableId)) {
stateStoreTableIds.add(tableId);
}
}
// Upgrade of state store table
for (final TableId tableId : stateStoreTableIds) {
Runnable runnable = new Runnable() {
public void run() {
try {
LOG.info("Upgrading queue state store: {}", tableId);
DatasetId stateStoreId = createStateStoreDataset(namespaceMeta.getName());
DatasetAdmin datasetAdmin = datasetFramework.getAdmin(stateStoreId, null);
if (datasetAdmin == null) {
LOG.error("No dataset admin available for {}", stateStoreId);
return;
}
datasetAdmin.upgrade();
LOG.info("Upgraded queue state store: {}", tableId);
} catch (Exception e) {
new RuntimeException(e);
}
}
};
Future<?> future = executor.submit(runnable);
futures.put(tableId, future);
}
return futures;
}
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseStreamFileConsumerFactory method create.
@Override
protected StreamConsumer create(TableId tableId, StreamConfig streamConfig, ConsumerConfig consumerConfig, StreamConsumerStateStore stateStore, StreamConsumerState beginConsumerState, FileReader<StreamEventOffset, Iterable<StreamFileOffset>> reader, @Nullable ReadFilter extraFilter) throws IOException {
int splits = cConf.getInt(Constants.Stream.CONSUMER_TABLE_PRESPLITS);
AbstractRowKeyDistributor distributor = new RowKeyDistributorByHashPrefix(new RowKeyDistributorByHashPrefix.OneByteSimpleHash(splits));
byte[][] splitKeys = HBaseTableUtil.getSplitKeys(splits, splits, distributor);
TableId hBaseTableId = tableUtil.createHTableId(new NamespaceId(tableId.getNamespace()), tableId.getTableName());
TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(hBaseTableId, cConf);
ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(QueueEntryRow.COLUMN_FAMILY), hConf);
tdBuilder.addColumnFamily(cfdBuilder.build());
tdBuilder.addProperty(QueueConstants.DISTRIBUTOR_BUCKETS, Integer.toString(splits));
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
ddlExecutor.createTableIfNotExists(tdBuilder.build(), splitKeys);
}
HTable hTable = tableUtil.createHTable(hConf, hBaseTableId);
hTable.setWriteBufferSize(Constants.Stream.HBASE_WRITE_BUFFER_SIZE);
hTable.setAutoFlushTo(false);
return new HBaseStreamFileConsumer(cConf, streamConfig, consumerConfig, tableUtil, hTable, reader, stateStore, beginConsumerState, extraFilter, createKeyDistributor(hTable.getTableDescriptor()));
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class AbstractStreamFileConsumerFactory method create.
@Override
public final StreamConsumer create(StreamId streamId, String namespace, ConsumerConfig consumerConfig) throws IOException {
StreamConfig streamConfig = StreamUtils.ensureExists(streamAdmin, streamId);
TableId tableId = getTableId(streamId, namespace);
StreamConsumerStateStore stateStore = stateStoreFactory.create(streamConfig);
StreamConsumerState consumerState = stateStore.get(consumerConfig.getGroupId(), consumerConfig.getInstanceId());
return create(tableId, streamConfig, consumerConfig, stateStore, consumerState, createReader(streamConfig, consumerState), new TTLReadFilter(streamConfig.getTTL()));
}
use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseQueueAdmin method upgrade.
@Override
public void upgrade() throws Exception {
int numThreads = cConf.getInt(Constants.Upgrade.UPGRADE_THREAD_POOL_SIZE);
final ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactoryBuilder().setThreadFactory(Executors.privilegedThreadFactory()).setNameFormat("hbase-queue-upgrader-%d").setDaemon(true).build());
final List<Closeable> toClose = new ArrayList<>();
try {
final Map<TableId, Future<?>> allFutures = new HashMap<>();
// For each queue config table and queue data table in each namespace, perform an upgrade
for (final NamespaceMeta namespaceMeta : namespaceQueryAdmin.list()) {
impersonator.doAs(namespaceMeta.getNamespaceId(), new Callable<Void>() {
@Override
public Void call() throws Exception {
HBaseAdmin hBaseAdmin = new HBaseAdmin(hConf);
// register it for close, after all Futures are complete
toClose.add(hBaseAdmin);
Map<TableId, Future<?>> futures = upgradeQueues(namespaceMeta, executor, hBaseAdmin);
allFutures.putAll(futures);
return null;
}
});
}
// Wait for the queue upgrades to complete
Map<TableId, Throwable> failed = waitForUpgrade(allFutures);
if (!failed.isEmpty()) {
for (Map.Entry<TableId, Throwable> entry : failed.entrySet()) {
LOG.error("Failed to upgrade queue table {}", entry.getKey(), entry.getValue());
}
throw new Exception(String.format("Error upgrading queue tables. %s of %s failed", failed.size(), allFutures.size()));
}
} finally {
for (Closeable closeable : toClose) {
Closeables.closeQuietly(closeable);
}
// We'll have tasks pending in the executor only on an interrupt, when user wants to abort the upgrade.
// Use shutdownNow() to interrupt the tasks and abort.
executor.shutdownNow();
}
}
Aggregations