Search in sources :

Example 36 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class AbstractHBaseTableUtilTest method enable.

private void enable(String namespace, String tableName) throws IOException {
    HBaseTableUtil tableUtil = getTableUtil();
    TableId hTableId = tableUtil.createHTableId(new NamespaceId(namespace), tableName);
    TableName name = HTableNameConverter.toTableName(getPrefix(), hTableId);
    ddlExecutor.enableTableIfDisabled(name.getNamespaceAsString(), name.getQualifierAsString());
}
Also used : TableId(co.cask.cdap.data2.util.TableId) TableName(org.apache.hadoop.hbase.TableName) NamespaceId(co.cask.cdap.proto.id.NamespaceId)

Example 37 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseMetricsTable method put.

@Override
public void put(SortedMap<byte[], ? extends SortedMap<byte[], Long>> updates) {
    List<Put> puts = Lists.newArrayList();
    for (Map.Entry<byte[], ? extends SortedMap<byte[], Long>> row : updates.entrySet()) {
        byte[] distributedKey = createDistributedRowKey(row.getKey());
        PutBuilder put = tableUtil.buildPut(distributedKey);
        for (Map.Entry<byte[], Long> column : row.getValue().entrySet()) {
            put.add(columnFamily, column.getKey(), Bytes.toBytes(column.getValue()));
        }
        puts.add(put.build());
    }
    try {
        hTable.put(puts);
        hTable.flushCommits();
    } catch (IOException e) {
        throw new DataSetException("Put failed on table " + tableId, e);
    }
}
Also used : PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) DataSetException(co.cask.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap) Put(org.apache.hadoop.hbase.client.Put)

Example 38 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseMetricsTable method putBytes.

@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
    List<Put> puts = Lists.newArrayList();
    for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
        byte[] distributedKey = createDistributedRowKey(row.getKey());
        PutBuilder put = tableUtil.buildPut(distributedKey);
        for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
            put.add(columnFamily, column.getKey(), column.getValue());
        }
        puts.add(put.build());
    }
    try {
        hTable.put(puts);
        hTable.flushCommits();
    } catch (IOException e) {
        throw new DataSetException("Put failed on table " + tableId, e);
    }
}
Also used : PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) DataSetException(co.cask.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap) Put(org.apache.hadoop.hbase.client.Put)

Example 39 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseTableAdmin method create.

@Override
public void create() throws IOException {
    String columnFamily = Bytes.toString(TableProperties.getColumnFamilyBytes(spec.getProperties()));
    ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(columnFamily, hConf);
    if (TableProperties.getReadlessIncrementSupport(spec.getProperties())) {
        cfdBuilder.setMaxVersions(Integer.MAX_VALUE);
    } else if (DatasetsUtil.isTransactional(spec.getProperties())) {
        // NOTE: we cannot limit number of versions as there's no hard limit on # of excluded from read txs
        cfdBuilder.setMaxVersions(Integer.MAX_VALUE);
    } else {
        cfdBuilder.setMaxVersions(1);
    }
    cfdBuilder.setBloomType(ColumnFamilyDescriptor.BloomType.ROW);
    Long ttl = TableProperties.getTTL(spec.getProperties());
    if (ttl != null) {
        // convert ttl from seconds to milli-seconds
        ttl = TimeUnit.SECONDS.toMillis(ttl);
        cfdBuilder.addProperty(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
    }
    final TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf);
    // if the dataset is configured for read-less increments, then set the table property to support upgrades
    boolean supportsReadlessIncrements = TableProperties.getReadlessIncrementSupport(spec.getProperties());
    if (supportsReadlessIncrements) {
        tdBuilder.addProperty(Table.PROPERTY_READLESS_INCREMENT, "true");
    }
    // if the dataset is configured to be non-transactional, then set the table property to support upgrades
    if (!DatasetsUtil.isTransactional(spec.getProperties())) {
        tdBuilder.addProperty(Constants.Dataset.TABLE_TX_DISABLED, "true");
        if (supportsReadlessIncrements) {
            // read-less increments CPs by default assume that table is transactional
            cfdBuilder.addProperty("dataset.table.readless.increment.transactional", "false");
        }
    }
    tdBuilder.addColumnFamily(cfdBuilder.build());
    CoprocessorJar coprocessorJar = createCoprocessorJar();
    for (Class<? extends Coprocessor> coprocessor : coprocessorJar.getCoprocessors()) {
        tdBuilder.addCoprocessor(coprocessorManager.getCoprocessorDescriptor(coprocessor, coprocessorJar.getPriority(coprocessor)));
    }
    byte[][] splits = null;
    String splitsProperty = spec.getProperty(PROPERTY_SPLITS);
    if (splitsProperty != null) {
        splits = GSON.fromJson(splitsProperty, byte[][].class);
    }
    // Disable split policy
    String splitsPolicy = spec.getProperty(SPLIT_POLICY);
    if (!Strings.isNullOrEmpty(splitsPolicy)) {
        tdBuilder.addProperty(HTableDescriptor.SPLIT_POLICY, splitsPolicy);
    }
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
        ddlExecutor.createTableIfNotExists(tdBuilder.build(), splits);
        try {
            Map<String, String> permissions = TableProperties.getTablePermissions(spec.getProperties());
            if (permissions != null && !permissions.isEmpty()) {
                tableUtil.grantPermissions(ddlExecutor, tableId, permissions);
            }
        } catch (IOException | RuntimeException e) {
            try {
                drop();
            } catch (Throwable t) {
                e.addSuppressed(t);
            }
            throw e;
        }
    }
}
Also used : HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) ColumnFamilyDescriptorBuilder(co.cask.cdap.data2.util.hbase.ColumnFamilyDescriptorBuilder) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) TableDescriptorBuilder(co.cask.cdap.data2.util.hbase.TableDescriptorBuilder) IOException(java.io.IOException)

Example 40 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class AbstractHBaseDataSetAdmin method updateTable.

/**
 * Performs update on a given HBase table. It will be updated if either its spec has
 * changed since the HBase table was created or updated, or if the CDAP version recorded
 * in the HTable descriptor is less than the current CDAP version.
 *
 * @param force forces update regardless of whether the table needs it.
 * @throws IOException If update failed.
 */
public void updateTable(boolean force) throws IOException {
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
        HTableDescriptor tableDescriptor;
        HTableDescriptorBuilder newDescriptor;
        try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
            tableDescriptor = tableUtil.getHTableDescriptor(admin, tableId);
            // create a new descriptor for the table update
            newDescriptor = tableUtil.buildHTableDescriptor(tableDescriptor);
        }
        // update any table properties if necessary
        boolean needUpdate = needsUpdate(tableDescriptor, newDescriptor) || force;
        // Get the cdap version from the table
        ProjectInfo.Version version = HBaseTableUtil.getVersion(tableDescriptor);
        String hbaseVersion = HBaseTableUtil.getHBaseVersion(tableDescriptor);
        if (!needUpdate && hbaseVersion != null && hbaseVersion.equals(HBaseVersion.getVersionString()) && version.compareTo(ProjectInfo.getVersion()) >= 0) {
            // If neither the table spec nor the cdap version have changed, no need to update
            LOG.info("Table '{}' has not changed and its version '{}' is same or greater " + "than current CDAP version '{}'. The underlying HBase version {} has also not changed.", tableId, version, ProjectInfo.getVersion(), hbaseVersion);
            return;
        }
        // Generate the coprocessor jar
        CoprocessorJar coprocessorJar = createCoprocessorJar();
        Location jarLocation = coprocessorJar.getJarLocation();
        // Check if coprocessor upgrade is needed
        Map<String, HBaseTableUtil.CoprocessorInfo> coprocessorInfo = HBaseTableUtil.getCoprocessorInfo(tableDescriptor);
        // For all required coprocessors, check if they've need to be upgraded.
        for (Class<? extends Coprocessor> coprocessor : coprocessorJar.getCoprocessors()) {
            HBaseTableUtil.CoprocessorInfo info = coprocessorInfo.get(coprocessor.getName());
            if (info != null) {
                // The same coprocessor has been configured, check by the file name to see if they are the same.
                if (!jarLocation.getName().equals(info.getPath().getName())) {
                    // Remove old one and add the new one.
                    newDescriptor.removeCoprocessor(info.getClassName());
                    addCoprocessor(newDescriptor, coprocessor, coprocessorJar.getPriority(coprocessor));
                }
            } else {
                // The coprocessor is missing from the table, add it.
                addCoprocessor(newDescriptor, coprocessor, coprocessorJar.getPriority(coprocessor));
            }
        }
        // Removes all old coprocessors
        Set<String> coprocessorNames = ImmutableSet.copyOf(Iterables.transform(coprocessorJar.coprocessors, CLASS_TO_NAME));
        for (String remove : Sets.difference(coprocessorInfo.keySet(), coprocessorNames)) {
            newDescriptor.removeCoprocessor(remove);
        }
        HBaseTableUtil.setVersion(newDescriptor);
        HBaseTableUtil.setHBaseVersion(newDescriptor);
        HBaseTableUtil.setTablePrefix(newDescriptor, cConf);
        LOG.info("Updating table '{}'...", tableId);
        TableName tableName = HTableNameConverter.toTableName(cConf.get(Constants.Dataset.TABLE_PREFIX), tableId);
        boolean enableTable = false;
        try {
            ddlExecutor.disableTableIfEnabled(tableName.getNamespaceAsString(), tableName.getQualifierAsString());
            enableTable = true;
        } catch (TableNotEnabledException e) {
            // If the table is in cdap_system namespace enable it regardless so that they can be used later. See CDAP-7324
            if (isSystemTable()) {
                enableTable = true;
            } else {
                LOG.debug("Table '{}' was not enabled before update and will not be enabled after update.", tableId);
            }
        }
        tableUtil.modifyTable(ddlExecutor, newDescriptor.build());
        if (enableTable) {
            LOG.debug("Enabling table '{}'...", tableId);
            ddlExecutor.enableTableIfDisabled(tableName.getNamespaceAsString(), tableName.getQualifierAsString());
        }
    }
    LOG.info("Table '{}' update completed.", tableId);
}
Also used : HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) TableName(org.apache.hadoop.hbase.TableName) ProjectInfo(co.cask.cdap.common.utils.ProjectInfo) Location(org.apache.twill.filesystem.Location) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException)

Aggregations

TableId (co.cask.cdap.data2.util.TableId)102 Test (org.junit.Test)49 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)41 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)39 Put (org.apache.hadoop.hbase.client.Put)34 NamespaceId (co.cask.cdap.proto.id.NamespaceId)26 Cell (org.apache.hadoop.hbase.Cell)24 Scan (org.apache.hadoop.hbase.client.Scan)23 HTableDescriptorBuilder (co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder)20 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)18 HBaseTableUtil (co.cask.cdap.data2.util.hbase.HBaseTableUtil)16 HBaseTableUtilFactory (co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory)15 IOException (java.io.IOException)14 HTable (org.apache.hadoop.hbase.client.HTable)12 HBaseDDLExecutor (co.cask.cdap.spi.hbase.HBaseDDLExecutor)11 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)10 Delete (org.apache.hadoop.hbase.client.Delete)9 Result (org.apache.hadoop.hbase.client.Result)9 Path (org.apache.hadoop.fs.Path)8 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)8