Search in sources :

Example 86 with TableId

use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseTableFactory method upgradeCoProcessor.

private void upgradeCoProcessor(TableId tableId, Class<? extends Coprocessor> coprocessor) throws IOException {
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
        HTableDescriptor tableDescriptor;
        try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
            // If table doesn't exist, then skip upgrading coprocessor
            if (!tableUtil.tableExists(admin, tableId)) {
                LOG.debug("TMS Table {} was not found. Skip upgrading coprocessor.", tableId);
                return;
            }
            tableDescriptor = tableUtil.getHTableDescriptor(admin, tableId);
        }
        // Get cdap version from the table
        ProjectInfo.Version version = HBaseTableUtil.getVersion(tableDescriptor);
        String hbaseVersion = HBaseTableUtil.getHBaseVersion(tableDescriptor);
        if (hbaseVersion != null && hbaseVersion.equals(HBaseVersion.getVersionString()) && version.compareTo(ProjectInfo.getVersion()) >= 0) {
            // If cdap has version has not changed or is greater, no need to update. Just enable it, in case
            // it has been disabled by the upgrade tool, and return
            LOG.info("Table '{}' has not changed and its version '{}' is same or greater than current CDAP version '{}'." + " The underlying HBase version {} has also not changed.", tableId, version, ProjectInfo.getVersion(), hbaseVersion);
            enableTable(ddlExecutor, tableId);
            return;
        }
        // create a new descriptor for the table update
        HTableDescriptorBuilder newDescriptor = tableUtil.buildHTableDescriptor(tableDescriptor);
        // Remove old coprocessor
        Map<String, HBaseTableUtil.CoprocessorInfo> coprocessorInfo = HBaseTableUtil.getCoprocessorInfo(tableDescriptor);
        for (Map.Entry<String, HBaseTableUtil.CoprocessorInfo> coprocessorEntry : coprocessorInfo.entrySet()) {
            newDescriptor.removeCoprocessor(coprocessorEntry.getValue().getClassName());
        }
        // Add new coprocessor
        CoprocessorDescriptor coprocessorDescriptor = coprocessorManager.getCoprocessorDescriptor(coprocessor, Coprocessor.PRIORITY_USER);
        Path path = coprocessorDescriptor.getPath() == null ? null : new Path(coprocessorDescriptor.getPath());
        newDescriptor.addCoprocessor(coprocessorDescriptor.getClassName(), path, coprocessorDescriptor.getPriority(), coprocessorDescriptor.getProperties());
        // Update CDAP version, table prefix
        HBaseTableUtil.setVersion(newDescriptor);
        HBaseTableUtil.setHBaseVersion(newDescriptor);
        HBaseTableUtil.setTablePrefix(newDescriptor, cConf);
        // Disable auto-splitting
        newDescriptor.setValue(HTableDescriptor.SPLIT_POLICY, cConf.get(Constants.MessagingSystem.TABLE_HBASE_SPLIT_POLICY));
        // Disable Table
        disableTable(ddlExecutor, tableId);
        tableUtil.modifyTable(ddlExecutor, newDescriptor.build());
        LOG.debug("Enabling table '{}'...", tableId);
        enableTable(ddlExecutor, tableId);
    }
    LOG.info("Table '{}' update completed.", tableId);
}
Also used : HBaseDDLExecutor(io.cdap.cdap.spi.hbase.HBaseDDLExecutor) Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(io.cdap.cdap.data2.util.hbase.HTableDescriptorBuilder) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) ProjectInfo(io.cdap.cdap.common.utils.ProjectInfo) CoprocessorDescriptor(io.cdap.cdap.spi.hbase.CoprocessorDescriptor) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 87 with TableId

use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseTableFactory method createTable.

/**
 * Creates a new instance of {@link Table} for the given {@link TableId}. If the hbase table doesn't
 * exist, a new one will be created with the given number of splits.
 */
private HTableWithRowKeyDistributor createTable(TableId tableId, int splits, Class<? extends Coprocessor> coprocessor) throws IOException {
    // Lookup the table descriptor from the cache first. If it is there, we assume the HBase table exists
    // Otherwise, attempt to create it.
    Table table = null;
    HTableDescriptor htd = tableDescriptors.get(tableId);
    if (htd == null) {
        synchronized (this) {
            htd = tableDescriptors.get(tableId);
            if (htd == null) {
                boolean tableExists;
                try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
                    tableExists = tableUtil.tableExists(admin, tableId);
                }
                // Create the table if the table doesn't exist
                try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
                    // If table exists, then skip creating coprocessor etc
                    if (!tableExists) {
                        TableId metadataTableId = tableUtil.createHTableId(NamespaceId.SYSTEM, cConf.get(Constants.MessagingSystem.METADATA_TABLE_NAME));
                        ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(COLUMN_FAMILY), hConf);
                        TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf).addColumnFamily(cfdBuilder.build()).addProperty(Constants.MessagingSystem.HBASE_MESSAGING_TABLE_PREFIX_NUM_BYTES, Integer.toString(1)).addProperty(Constants.MessagingSystem.KEY_DISTRIBUTOR_BUCKETS_ATTR, Integer.toString(splits)).addProperty(Constants.MessagingSystem.HBASE_METADATA_TABLE_NAMESPACE, metadataTableId.getNamespace()).addProperty(HTableDescriptor.SPLIT_POLICY, cConf.get(Constants.MessagingSystem.TABLE_HBASE_SPLIT_POLICY)).addCoprocessor(coprocessorManager.getCoprocessorDescriptor(coprocessor, Coprocessor.PRIORITY_USER));
                        // Set the key distributor size the same as the initial number of splits,
                        // essentially one bucket per split.
                        byte[][] splitKeys = HBaseTableUtil.getSplitKeys(splits, splits, new RowKeyDistributorByHashPrefix(new OneByteSimpleHash(splits)));
                        ddlExecutor.createTableIfNotExists(tdBuilder.build(), splitKeys);
                        table = tableUtil.createTable(hConf, tableId);
                        htd = table.getTableDescriptor();
                        tableDescriptors.put(tableId, htd);
                    } else {
                        table = tableUtil.createTable(hConf, tableId);
                        htd = table.getTableDescriptor();
                        tableDescriptors.put(tableId, htd);
                    }
                }
            }
        }
    }
    if (table == null) {
        table = tableUtil.createTable(hConf, tableId);
    }
    return new HTableWithRowKeyDistributor(table, new RowKeyDistributorByHashPrefix(new OneByteSimpleHash(getKeyDistributorBuckets(tableId, htd))));
}
Also used : HBaseDDLExecutor(io.cdap.cdap.spi.hbase.HBaseDDLExecutor) TableId(io.cdap.cdap.data2.util.TableId) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) MetadataTable(io.cdap.cdap.messaging.store.MetadataTable) MessageTable(io.cdap.cdap.messaging.store.MessageTable) PayloadTable(io.cdap.cdap.messaging.store.PayloadTable) Table(org.apache.hadoop.hbase.client.Table) RowKeyDistributorByHashPrefix(io.cdap.cdap.hbase.wd.RowKeyDistributorByHashPrefix) ColumnFamilyDescriptorBuilder(io.cdap.cdap.data2.util.hbase.ColumnFamilyDescriptorBuilder) OneByteSimpleHash(io.cdap.cdap.hbase.wd.RowKeyDistributorByHashPrefix.OneByteSimpleHash) HTableDescriptorBuilder(io.cdap.cdap.data2.util.hbase.HTableDescriptorBuilder) TableDescriptorBuilder(io.cdap.cdap.data2.util.hbase.TableDescriptorBuilder) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 88 with TableId

use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseTableFactory method createPayloadTable.

@Override
public PayloadTable createPayloadTable(TopicMetadata topicMetadata) throws IOException {
    TableId tableId = tableUtil.createHTableId(NamespaceId.SYSTEM, payloadTableName);
    Class<? extends Coprocessor> tableCoprocessor = tableUtil.getPayloadTableRegionObserverClassForVersion();
    HTableWithRowKeyDistributor tableWithRowKeyDistributor = createTable(tableId, cConf.getInt(Constants.MessagingSystem.PAYLOAD_TABLE_HBASE_SPLITS), tableCoprocessor);
    return new HBasePayloadTable(tableUtil, tableWithRowKeyDistributor.getTable(), COLUMN_FAMILY, tableWithRowKeyDistributor.getRowKeyDistributor(), scanExecutor, cConf.getInt(Constants.MessagingSystem.HBASE_SCAN_CACHE_ROWS), createExceptionHandler(tableId));
}
Also used : TableId(io.cdap.cdap.data2.util.TableId)

Example 89 with TableId

use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.

the class ReplicationStatusTool method getReplicationStateTableId.

private static TableId getReplicationStateTableId(HBaseTableUtil tableUtil) throws IOException {
    String tableName = hConf.get(ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_NAME);
    String ns = hConf.get(ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_NAMESPACE);
    TableId tableId = tableUtil.createHTableId((ns != null) ? new NamespaceId(ns) : new NamespaceId(ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_DEFAULT_NAMESPACE), (tableName != null) ? tableName : ReplicationConstants.ReplicationStatusTool.REPLICATION_STATE_TABLE_DEFAULT_NAME);
    return tableId;
}
Also used : TableId(io.cdap.cdap.data2.util.TableId) NamespaceId(io.cdap.cdap.proto.id.NamespaceId)

Example 90 with TableId

use of io.cdap.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseQueueAdmin method upgradeQueues.

private Map<TableId, Future<?>> upgradeQueues(final NamespaceMeta namespaceMeta, ExecutorService executor, final HBaseAdmin admin) throws Exception {
    String hbaseNamespace = tableUtil.getHBaseNamespace(namespaceMeta);
    List<TableId> tableIds = tableUtil.listTablesInNamespace(admin, hbaseNamespace);
    List<TableId> stateStoreTableIds = Lists.newArrayList();
    Map<TableId, Future<?>> futures = new HashMap<>();
    for (final TableId tableId : tableIds) {
        // It's important to skip config table enabled.
        if (isDataTable(tableId)) {
            Callable<Void> callable = new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    LOG.info("Upgrading queue table: {}", tableId);
                    Properties properties = new Properties();
                    HTableDescriptor desc = tableUtil.getHTableDescriptor(admin, tableId);
                    if (desc.getValue(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES) == null) {
                        // It's the old queue table. Set the property prefix bytes to SALT_BYTES
                        properties.setProperty(HBaseQueueAdmin.PROPERTY_PREFIX_BYTES, Integer.toString(SaltedHBaseQueueStrategy.SALT_BYTES));
                    }
                    upgrade(tableId, properties);
                    LOG.info("Upgraded queue table: {}", tableId);
                    return null;
                }
            };
            Future<?> future = executor.submit(ImpersonationUtils.createImpersonatingCallable(impersonator, namespaceMeta, callable));
            futures.put(tableId, future);
        } else if (isStateStoreTable(tableId)) {
            stateStoreTableIds.add(tableId);
        }
    }
    // Upgrade of state store table
    for (final TableId tableId : stateStoreTableIds) {
        Callable<Void> callable = new Callable<Void>() {

            @Override
            public Void call() throws Exception {
                LOG.info("Upgrading queue state store: {}", tableId);
                DatasetId stateStoreId = createStateStoreDataset(namespaceMeta.getName());
                DatasetAdmin datasetAdmin = datasetFramework.getAdmin(stateStoreId, null);
                if (datasetAdmin == null) {
                    LOG.error("No dataset admin available for {}", stateStoreId);
                    return null;
                }
                datasetAdmin.upgrade();
                LOG.info("Upgraded queue state store: {}", tableId);
                return null;
            }
        };
        Future<?> future = executor.submit(ImpersonationUtils.createImpersonatingCallable(impersonator, namespaceMeta, callable));
        futures.put(tableId, future);
    }
    return futures;
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HashMap(java.util.HashMap) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) TableProperties(co.cask.cdap.api.dataset.table.TableProperties) Properties(java.util.Properties) Callable(java.util.concurrent.Callable) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) DatasetId(co.cask.cdap.proto.id.DatasetId) Future(java.util.concurrent.Future)

Aggregations

TableId (co.cask.cdap.data2.util.TableId)102 Test (org.junit.Test)95 TableId (io.cdap.cdap.data2.util.TableId)78 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)60 Put (org.apache.hadoop.hbase.client.Put)57 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)55 Cell (org.apache.hadoop.hbase.Cell)42 Scan (org.apache.hadoop.hbase.client.Scan)40 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)32 NamespaceId (co.cask.cdap.proto.id.NamespaceId)26 NamespaceId (io.cdap.cdap.proto.id.NamespaceId)19 IOException (java.io.IOException)17 HTableDescriptorBuilder (io.cdap.cdap.data2.util.hbase.HTableDescriptorBuilder)16 Result (org.apache.hadoop.hbase.client.Result)16 Delete (org.apache.hadoop.hbase.client.Delete)15 Get (org.apache.hadoop.hbase.client.Get)14 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)14 HBaseTableUtil (io.cdap.cdap.data2.util.hbase.HBaseTableUtil)12 TableName (org.apache.hadoop.hbase.TableName)12 HTable (org.apache.hadoop.hbase.client.HTable)12