Search in sources :

Example 1 with HBaseDDLExecutor

use of co.cask.cdap.spi.hbase.HBaseDDLExecutor in project cdap by caskdata.

the class DatasetUpgrader method upgradeUserTables.

private Map<String, Future<?>> upgradeUserTables(final NamespaceMeta namespaceMeta, ExecutorService executor) throws Exception {
    Map<String, Future<?>> futures = new HashMap<>();
    String hBaseNamespace = hBaseTableUtil.getHBaseNamespace(namespaceMeta);
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get();
        HBaseAdmin hAdmin = new HBaseAdmin(hConf)) {
        for (final HTableDescriptor desc : hAdmin.listTableDescriptorsByNamespace(HTableNameConverter.encodeHBaseEntity(hBaseNamespace))) {
            Runnable runnable = new Runnable() {

                public void run() {
                    try {
                        impersonator.doAs(namespaceMeta.getNamespaceId(), new Callable<Void>() {

                            @Override
                            public Void call() throws Exception {
                                if (isCDAPUserTable(desc)) {
                                    upgradeUserTable(desc);
                                } else if (isStreamOrQueueTable(desc.getNameAsString())) {
                                    updateTableDesc(desc, ddlExecutor);
                                }
                                return null;
                            }
                        });
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            };
            Future<?> future = executor.submit(runnable);
            futures.put(desc.getNameAsString(), future);
        }
    }
    return futures;
}
Also used : HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) HashMap(java.util.HashMap) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Future(java.util.concurrent.Future)

Example 2 with HBaseDDLExecutor

use of co.cask.cdap.spi.hbase.HBaseDDLExecutor in project cdap by caskdata.

the class ConfigurationTable method write.

/**
   * Writes the {@link CConfiguration} instance as a new row to the HBase table.  The {@link Type} given is used as
   * the row key (allowing multiple configurations to be stored).  After the new configuration is written, this will
   * delete any configurations written with an earlier timestamp (to prevent removed values from being visible).
   * @param cConf The CConfiguration instance to store
   * @throws IOException If an error occurs while writing the configuration
   */
public void write(Type type, CConfiguration cConf) throws IOException {
    // must create the table if it doesn't exist
    HTable table = null;
    try (HBaseDDLExecutor ddlExecutor = new HBaseDDLExecutorFactory(cConf, hbaseConf).get()) {
        HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
        TableId tableId = tableUtil.createHTableId(NamespaceId.SYSTEM, TABLE_NAME);
        ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(FAMILY), hbaseConf);
        TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf).addColumnFamily(cfdBuilder.build());
        ddlExecutor.createTableIfNotExists(tdBuilder.build(), null);
        long now = System.currentTimeMillis();
        long previous = now - 1;
        byte[] typeBytes = Bytes.toBytes(type.name());
        LOG.info("Writing new config row with key " + type);
        // populate the configuration data
        table = tableUtil.createHTable(hbaseConf, tableId);
        table.setAutoFlush(false);
        Put p = new Put(typeBytes);
        for (Map.Entry<String, String> e : cConf) {
            p.add(FAMILY, Bytes.toBytes(e.getKey()), now, Bytes.toBytes(e.getValue()));
        }
        table.put(p);
        LOG.info("Deleting any configuration from " + previous + " or before");
        Delete d = new Delete(typeBytes);
        d.deleteFamily(FAMILY, previous);
        table.delete(d);
    } finally {
        if (table != null) {
            try {
                table.close();
            } catch (IOException ioe) {
                LOG.error("Error closing HBaseAdmin: " + ioe.getMessage(), ioe);
            }
        }
    }
}
Also used : HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) TableId(co.cask.cdap.data2.util.TableId) Delete(org.apache.hadoop.hbase.client.Delete) IOException(java.io.IOException) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) Map(java.util.Map)

Example 3 with HBaseDDLExecutor

use of co.cask.cdap.spi.hbase.HBaseDDLExecutor in project cdap by caskdata.

the class HBaseStreamFileConsumerFactory method create.

@Override
protected StreamConsumer create(TableId tableId, StreamConfig streamConfig, ConsumerConfig consumerConfig, StreamConsumerStateStore stateStore, StreamConsumerState beginConsumerState, FileReader<StreamEventOffset, Iterable<StreamFileOffset>> reader, @Nullable ReadFilter extraFilter) throws IOException {
    int splits = cConf.getInt(Constants.Stream.CONSUMER_TABLE_PRESPLITS);
    AbstractRowKeyDistributor distributor = new RowKeyDistributorByHashPrefix(new RowKeyDistributorByHashPrefix.OneByteSimpleHash(splits));
    byte[][] splitKeys = HBaseTableUtil.getSplitKeys(splits, splits, distributor);
    TableId hBaseTableId = tableUtil.createHTableId(new NamespaceId(tableId.getNamespace()), tableId.getTableName());
    TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(hBaseTableId, cConf);
    ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(QueueEntryRow.COLUMN_FAMILY), hConf);
    tdBuilder.addColumnFamily(cfdBuilder.build());
    tdBuilder.addProperty(QueueConstants.DISTRIBUTOR_BUCKETS, Integer.toString(splits));
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
        ddlExecutor.createTableIfNotExists(tdBuilder.build(), splitKeys);
    }
    HTable hTable = tableUtil.createHTable(hConf, hBaseTableId);
    hTable.setWriteBufferSize(Constants.Stream.HBASE_WRITE_BUFFER_SIZE);
    hTable.setAutoFlushTo(false);
    return new HBaseStreamFileConsumer(cConf, streamConfig, consumerConfig, tableUtil, hTable, reader, stateStore, beginConsumerState, extraFilter, createKeyDistributor(hTable.getTableDescriptor()));
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) RowKeyDistributorByHashPrefix(co.cask.cdap.hbase.wd.RowKeyDistributorByHashPrefix) AbstractRowKeyDistributor(co.cask.cdap.hbase.wd.AbstractRowKeyDistributor) ColumnFamilyDescriptorBuilder(co.cask.cdap.data2.util.hbase.ColumnFamilyDescriptorBuilder) TableDescriptorBuilder(co.cask.cdap.data2.util.hbase.TableDescriptorBuilder) NamespaceId(co.cask.cdap.proto.id.NamespaceId) HTable(org.apache.hadoop.hbase.client.HTable)

Example 4 with HBaseDDLExecutor

use of co.cask.cdap.spi.hbase.HBaseDDLExecutor in project cdap by caskdata.

the class HBaseStreamConsumerStateStoreFactory method create.

@Override
public synchronized StreamConsumerStateStore create(StreamConfig streamConfig) throws IOException {
    NamespaceId namespace = streamConfig.getStreamId().getParent();
    TableId streamStateStoreTableId = StreamUtils.getStateStoreTableId(namespace);
    TableId hbaseTableId = tableUtil.createHTableId(new NamespaceId(streamStateStoreTableId.getNamespace()), streamStateStoreTableId.getTableName());
    boolean tableExist;
    try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
        tableExist = tableUtil.tableExists(admin, hbaseTableId);
    }
    if (!tableExist) {
        try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
            TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(hbaseTableId, cConf);
            ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(QueueEntryRow.COLUMN_FAMILY), hConf);
            tdBuilder.addColumnFamily(cfdBuilder.build());
            ddlExecutor.createTableIfNotExists(tdBuilder.build(), null);
        }
    }
    HTable hTable = tableUtil.createHTable(hConf, hbaseTableId);
    hTable.setWriteBufferSize(Constants.Stream.HBASE_WRITE_BUFFER_SIZE);
    hTable.setAutoFlushTo(false);
    return new HBaseStreamConsumerStateStore(streamConfig, hTable);
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) ColumnFamilyDescriptorBuilder(co.cask.cdap.data2.util.hbase.ColumnFamilyDescriptorBuilder) TableDescriptorBuilder(co.cask.cdap.data2.util.hbase.TableDescriptorBuilder) NamespaceId(co.cask.cdap.proto.id.NamespaceId) HTable(org.apache.hadoop.hbase.client.HTable)

Example 5 with HBaseDDLExecutor

use of co.cask.cdap.spi.hbase.HBaseDDLExecutor in project cdap by caskdata.

the class HBaseTableFactory method createMetadataTable.

@Override
public MetadataTable createMetadataTable(String tableName) throws IOException {
    TableId tableId = tableUtil.createHTableId(NamespaceId.SYSTEM, tableName);
    HTable hTable = null;
    // If the table descriptor is in the cache, we assume the table exists.
    if (!tableDescriptors.containsKey(tableId)) {
        synchronized (this) {
            if (!tableDescriptors.containsKey(tableId)) {
                try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
                    ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(COLUMN_FAMILY), hConf);
                    TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf).addColumnFamily(cfdBuilder.build());
                    ddlExecutor.createTableIfNotExists(tdBuilder.build(), null);
                    hTable = tableUtil.createHTable(hConf, tableId);
                    tableDescriptors.put(tableId, hTable.getTableDescriptor());
                }
            }
        }
    }
    if (hTable == null) {
        hTable = tableUtil.createHTable(hConf, tableId);
    }
    return new HBaseMetadataTable(tableUtil, hTable, COLUMN_FAMILY, cConf.getInt(Constants.MessagingSystem.HBASE_SCAN_CACHE_ROWS), createExceptionHandler(tableId));
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) ColumnFamilyDescriptorBuilder(co.cask.cdap.data2.util.hbase.ColumnFamilyDescriptorBuilder) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) TableDescriptorBuilder(co.cask.cdap.data2.util.hbase.TableDescriptorBuilder) HTable(org.apache.hadoop.hbase.client.HTable)

Aggregations

HBaseDDLExecutor (co.cask.cdap.spi.hbase.HBaseDDLExecutor)17 TableId (co.cask.cdap.data2.util.TableId)8 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)7 IOException (java.io.IOException)6 ColumnFamilyDescriptorBuilder (co.cask.cdap.data2.util.hbase.ColumnFamilyDescriptorBuilder)5 HTableDescriptorBuilder (co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder)5 TableDescriptorBuilder (co.cask.cdap.data2.util.hbase.TableDescriptorBuilder)5 HTable (org.apache.hadoop.hbase.client.HTable)5 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)4 NamespaceId (co.cask.cdap.proto.id.NamespaceId)3 Map (java.util.Map)3 ProjectInfo (co.cask.cdap.common.utils.ProjectInfo)2 ExploreException (co.cask.cdap.explore.service.ExploreException)2 RowKeyDistributorByHashPrefix (co.cask.cdap.hbase.wd.RowKeyDistributorByHashPrefix)2 SQLException (java.sql.SQLException)2 HashMap (java.util.HashMap)2 ExecutionException (java.util.concurrent.ExecutionException)2 Future (java.util.concurrent.Future)2 SimpleNamespaceQueryAdmin (co.cask.cdap.common.namespace.SimpleNamespaceQueryAdmin)1 QueueConstants (co.cask.cdap.data2.transaction.queue.QueueConstants)1