Search in sources :

Example 1 with Update

use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class FileStreamAdmin method updateProperties.

private StreamProperties updateProperties(StreamId streamId, StreamProperties properties) throws Exception {
    StreamConfig config = getConfig(streamId);
    StreamConfig.Builder builder = StreamConfig.builder(config);
    if (properties.getTTL() != null) {
        builder.setTTL(properties.getTTL());
    }
    if (properties.getFormat() != null) {
        builder.setFormatSpec(properties.getFormat());
    }
    if (properties.getNotificationThresholdMB() != null) {
        builder.setNotificationThreshold(properties.getNotificationThresholdMB());
    }
    // update stream description
    String description = properties.getDescription();
    if (description != null) {
        streamMetaStore.addStream(streamId, description);
    }
    final StreamConfig newConfig = builder.build();
    impersonator.doAs(streamId, new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            writeConfig(newConfig);
            return null;
        }
    });
    // Update system metadata for stream
    SystemMetadataWriter systemMetadataWriter = new StreamSystemMetadataWriter(metadataStore, streamId, newConfig, description);
    systemMetadataWriter.write();
    return new StreamProperties(config.getTTL(), config.getFormat(), config.getNotificationThresholdMB());
}
Also used : StreamSystemMetadataWriter(co.cask.cdap.data2.metadata.system.StreamSystemMetadataWriter) SystemMetadataWriter(co.cask.cdap.data2.metadata.system.SystemMetadataWriter) StreamSystemMetadataWriter(co.cask.cdap.data2.metadata.system.StreamSystemMetadataWriter) StreamProperties(co.cask.cdap.proto.StreamProperties) CoordinatorStreamProperties(co.cask.cdap.data.stream.CoordinatorStreamProperties) NotificationFeedException(co.cask.cdap.notifications.feeds.NotificationFeedException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) NotFoundException(co.cask.cdap.common.NotFoundException) StreamNotFoundException(co.cask.cdap.common.StreamNotFoundException)

Example 2 with Update

use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class HBaseTableFactory method upgradeCoProcessor.

private void upgradeCoProcessor(TableId tableId, Class<? extends Coprocessor> coprocessor) throws IOException {
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
        HTableDescriptor tableDescriptor;
        try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
            // If table doesn't exist, then skip upgrading coprocessor
            if (!tableUtil.tableExists(admin, tableId)) {
                LOG.debug("TMS Table {} was not found. Skip upgrading coprocessor.", tableId);
                return;
            }
            tableDescriptor = tableUtil.getHTableDescriptor(admin, tableId);
        }
        // Get cdap version from the table
        ProjectInfo.Version version = HBaseTableUtil.getVersion(tableDescriptor);
        String hbaseVersion = HBaseTableUtil.getHBaseVersion(tableDescriptor);
        if (hbaseVersion != null && hbaseVersion.equals(HBaseVersion.getVersionString()) && version.compareTo(ProjectInfo.getVersion()) >= 0) {
            // If cdap has version has not changed or is greater, no need to update. Just enable it, in case
            // it has been disabled by the upgrade tool, and return
            LOG.info("Table '{}' has not changed and its version '{}' is same or greater than current CDAP version '{}'." + " The underlying HBase version {} has also not changed.", tableId, version, ProjectInfo.getVersion(), hbaseVersion);
            enableTable(ddlExecutor, tableId);
            return;
        }
        // create a new descriptor for the table update
        HTableDescriptorBuilder newDescriptor = tableUtil.buildHTableDescriptor(tableDescriptor);
        // Remove old coprocessor
        Map<String, HBaseTableUtil.CoprocessorInfo> coprocessorInfo = HBaseTableUtil.getCoprocessorInfo(tableDescriptor);
        for (Map.Entry<String, HBaseTableUtil.CoprocessorInfo> coprocessorEntry : coprocessorInfo.entrySet()) {
            newDescriptor.removeCoprocessor(coprocessorEntry.getValue().getClassName());
        }
        // Add new coprocessor
        CoprocessorDescriptor coprocessorDescriptor = coprocessorManager.getCoprocessorDescriptor(coprocessor, Coprocessor.PRIORITY_USER);
        Path path = coprocessorDescriptor.getPath() == null ? null : new Path(coprocessorDescriptor.getPath());
        newDescriptor.addCoprocessor(coprocessorDescriptor.getClassName(), path, coprocessorDescriptor.getPriority(), coprocessorDescriptor.getProperties());
        // Update CDAP version, table prefix
        HBaseTableUtil.setVersion(newDescriptor);
        HBaseTableUtil.setHBaseVersion(newDescriptor);
        HBaseTableUtil.setTablePrefix(newDescriptor, cConf);
        // Disable auto-splitting
        newDescriptor.setValue(HTableDescriptor.SPLIT_POLICY, cConf.get(Constants.MessagingSystem.TABLE_HBASE_SPLIT_POLICY));
        // Disable Table
        disableTable(ddlExecutor, tableId);
        tableUtil.modifyTable(ddlExecutor, newDescriptor.build());
        LOG.debug("Enabling table '{}'...", tableId);
        enableTable(ddlExecutor, tableId);
    }
    LOG.info("Table '{}' update completed.", tableId);
}
Also used : HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) ProjectInfo(co.cask.cdap.common.utils.ProjectInfo) CoprocessorDescriptor(co.cask.cdap.spi.hbase.CoprocessorDescriptor) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 3 with Update

use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class HBaseTable method persist.

@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> updates) throws Exception {
    if (updates.isEmpty()) {
        return;
    }
    byte[] txId = tx == null ? null : Bytes.toBytes(tx.getTransactionId());
    byte[] txWritePointer = tx == null ? null : Bytes.toBytes(tx.getWritePointer());
    List<Mutation> mutations = new ArrayList<>();
    for (Map.Entry<byte[], NavigableMap<byte[], Update>> row : updates.entrySet()) {
        // create these only when they are needed
        PutBuilder put = null;
        PutBuilder incrementPut = null;
        IncrementBuilder increment = null;
        for (Map.Entry<byte[], Update> column : row.getValue().entrySet()) {
            // we want support tx and non-tx modes
            if (tx != null) {
                // TODO: hijacking timestamp... bad
                Update val = column.getValue();
                if (val instanceof IncrementValue) {
                    if (safeReadlessIncrements) {
                        increment = getIncrement(increment, row.getKey(), txId, txWritePointer);
                        increment.add(columnFamily, column.getKey(), tx.getWritePointer(), ((IncrementValue) val).getValue());
                    } else {
                        incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
                        incrementPut.add(columnFamily, column.getKey(), tx.getWritePointer(), Bytes.toBytes(((IncrementValue) val).getValue()));
                    }
                } else if (val instanceof PutValue) {
                    put = getPut(put, row.getKey(), txId);
                    put.add(columnFamily, column.getKey(), tx.getWritePointer(), wrapDeleteIfNeeded(((PutValue) val).getValue()));
                }
            } else {
                Update val = column.getValue();
                if (val instanceof IncrementValue) {
                    incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
                    incrementPut.add(columnFamily, column.getKey(), Bytes.toBytes(((IncrementValue) val).getValue()));
                } else if (val instanceof PutValue) {
                    put = getPut(put, row.getKey(), txId);
                    put.add(columnFamily, column.getKey(), ((PutValue) val).getValue());
                }
            }
        }
        if (incrementPut != null) {
            mutations.add(incrementPut.build());
        }
        if (increment != null) {
            mutations.add(increment.build());
        }
        if (put != null) {
            mutations.add(put.build());
        }
    }
    if (!hbaseFlush(mutations)) {
        LOG.info("No writes to persist!");
    }
}
Also used : NavigableMap(java.util.NavigableMap) ArrayList(java.util.ArrayList) Update(co.cask.cdap.data2.dataset2.lib.table.Update) IncrementValue(co.cask.cdap.data2.dataset2.lib.table.IncrementValue) PutValue(co.cask.cdap.data2.dataset2.lib.table.PutValue) PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) IncrementBuilder(co.cask.cdap.data2.util.hbase.IncrementBuilder) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) NavigableMap(java.util.NavigableMap)

Example 4 with Update

use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class AbstractHBaseDataSetAdmin method updateTable.

/**
 * Performs update on a given HBase table. It will be updated if either its spec has
 * changed since the HBase table was created or updated, or if the CDAP version recorded
 * in the HTable descriptor is less than the current CDAP version.
 *
 * @param force forces update regardless of whether the table needs it.
 * @throws IOException If update failed.
 */
public void updateTable(boolean force) throws IOException {
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
        HTableDescriptor tableDescriptor;
        HTableDescriptorBuilder newDescriptor;
        try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
            tableDescriptor = tableUtil.getHTableDescriptor(admin, tableId);
            // create a new descriptor for the table update
            newDescriptor = tableUtil.buildHTableDescriptor(tableDescriptor);
        }
        // update any table properties if necessary
        boolean needUpdate = needsUpdate(tableDescriptor, newDescriptor) || force;
        // Get the cdap version from the table
        ProjectInfo.Version version = HBaseTableUtil.getVersion(tableDescriptor);
        String hbaseVersion = HBaseTableUtil.getHBaseVersion(tableDescriptor);
        if (!needUpdate && hbaseVersion != null && hbaseVersion.equals(HBaseVersion.getVersionString()) && version.compareTo(ProjectInfo.getVersion()) >= 0) {
            // If neither the table spec nor the cdap version have changed, no need to update
            LOG.info("Table '{}' has not changed and its version '{}' is same or greater " + "than current CDAP version '{}'. The underlying HBase version {} has also not changed.", tableId, version, ProjectInfo.getVersion(), hbaseVersion);
            return;
        }
        // Generate the coprocessor jar
        CoprocessorJar coprocessorJar = createCoprocessorJar();
        Location jarLocation = coprocessorJar.getJarLocation();
        // Check if coprocessor upgrade is needed
        Map<String, HBaseTableUtil.CoprocessorInfo> coprocessorInfo = HBaseTableUtil.getCoprocessorInfo(tableDescriptor);
        // For all required coprocessors, check if they've need to be upgraded.
        for (Class<? extends Coprocessor> coprocessor : coprocessorJar.getCoprocessors()) {
            HBaseTableUtil.CoprocessorInfo info = coprocessorInfo.get(coprocessor.getName());
            if (info != null) {
                // The same coprocessor has been configured, check by the file name to see if they are the same.
                if (!jarLocation.getName().equals(info.getPath().getName())) {
                    // Remove old one and add the new one.
                    newDescriptor.removeCoprocessor(info.getClassName());
                    addCoprocessor(newDescriptor, coprocessor, coprocessorJar.getPriority(coprocessor));
                }
            } else {
                // The coprocessor is missing from the table, add it.
                addCoprocessor(newDescriptor, coprocessor, coprocessorJar.getPriority(coprocessor));
            }
        }
        // Removes all old coprocessors
        Set<String> coprocessorNames = ImmutableSet.copyOf(Iterables.transform(coprocessorJar.coprocessors, CLASS_TO_NAME));
        for (String remove : Sets.difference(coprocessorInfo.keySet(), coprocessorNames)) {
            newDescriptor.removeCoprocessor(remove);
        }
        HBaseTableUtil.setVersion(newDescriptor);
        HBaseTableUtil.setHBaseVersion(newDescriptor);
        HBaseTableUtil.setTablePrefix(newDescriptor, cConf);
        LOG.info("Updating table '{}'...", tableId);
        TableName tableName = HTableNameConverter.toTableName(cConf.get(Constants.Dataset.TABLE_PREFIX), tableId);
        boolean enableTable = false;
        try {
            ddlExecutor.disableTableIfEnabled(tableName.getNamespaceAsString(), tableName.getQualifierAsString());
            enableTable = true;
        } catch (TableNotEnabledException e) {
            // If the table is in cdap_system namespace enable it regardless so that they can be used later. See CDAP-7324
            if (isSystemTable()) {
                enableTable = true;
            } else {
                LOG.debug("Table '{}' was not enabled before update and will not be enabled after update.", tableId);
            }
        }
        tableUtil.modifyTable(ddlExecutor, newDescriptor.build());
        if (enableTable) {
            LOG.debug("Enabling table '{}'...", tableId);
            ddlExecutor.enableTableIfDisabled(tableName.getNamespaceAsString(), tableName.getQualifierAsString());
        }
    }
    LOG.info("Table '{}' update completed.", tableId);
}
Also used : HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) TableName(org.apache.hadoop.hbase.TableName) ProjectInfo(co.cask.cdap.common.utils.ProjectInfo) Location(org.apache.twill.filesystem.Location) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException)

Example 5 with Update

use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class LevelDBTable method persist.

@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> changes) throws Exception {
    persistedVersion = tx == null ? System.currentTimeMillis() : tx.getWritePointer();
    NavigableMap<byte[], NavigableMap<byte[], byte[]>> puts = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    NavigableMap<byte[], NavigableMap<byte[], Long>> increments = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], NavigableMap<byte[], Update>> rowEntry : changes.entrySet()) {
        for (Map.Entry<byte[], Update> colEntry : rowEntry.getValue().entrySet()) {
            Update val = colEntry.getValue();
            if (val instanceof IncrementValue) {
                NavigableMap<byte[], Long> incrCols = increments.get(rowEntry.getKey());
                if (incrCols == null) {
                    incrCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
                    increments.put(rowEntry.getKey(), incrCols);
                }
                incrCols.put(colEntry.getKey(), ((IncrementValue) val).getValue());
            } else if (val instanceof PutValue) {
                NavigableMap<byte[], byte[]> putCols = puts.get(rowEntry.getKey());
                if (putCols == null) {
                    putCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
                    puts.put(rowEntry.getKey(), putCols);
                }
                putCols.put(colEntry.getKey(), ((PutValue) val).getValue());
            }
        }
    }
    if (!increments.isEmpty() || !puts.isEmpty()) {
        persist(increments, puts);
    }
}
Also used : IncrementValue(co.cask.cdap.data2.dataset2.lib.table.IncrementValue) PutValue(co.cask.cdap.data2.dataset2.lib.table.PutValue) NavigableMap(java.util.NavigableMap) Update(co.cask.cdap.data2.dataset2.lib.table.Update) NavigableMap(java.util.NavigableMap) Map(java.util.Map)

Aggregations

Update (co.cask.cdap.data2.dataset2.lib.table.Update)7 Map (java.util.Map)7 NavigableMap (java.util.NavigableMap)7 IOException (java.io.IOException)6 Test (org.junit.Test)6 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)4 DatasetSpecification (co.cask.cdap.api.dataset.DatasetSpecification)4 PutValue (co.cask.cdap.data2.dataset2.lib.table.PutValue)4 NotFoundException (co.cask.cdap.common.NotFoundException)3 IncrementValue (co.cask.cdap.data2.dataset2.lib.table.IncrementValue)3 TransactionExecutor (org.apache.tephra.TransactionExecutor)3 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)2 IncompatibleUpdateException (co.cask.cdap.api.dataset.IncompatibleUpdateException)2 Updatable (co.cask.cdap.api.dataset.Updatable)2 Table (co.cask.cdap.api.dataset.table.Table)2 BadRequestException (co.cask.cdap.common.BadRequestException)2 ConflictException (co.cask.cdap.common.ConflictException)2 ProjectInfo (co.cask.cdap.common.utils.ProjectInfo)2 DynamicTransactionExecutorFactory (co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory)2 DatasetType (co.cask.cdap.data2.datafabric.dataset.DatasetType)2