use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class FileStreamAdmin method updateProperties.
private StreamProperties updateProperties(StreamId streamId, StreamProperties properties) throws Exception {
StreamConfig config = getConfig(streamId);
StreamConfig.Builder builder = StreamConfig.builder(config);
if (properties.getTTL() != null) {
builder.setTTL(properties.getTTL());
}
if (properties.getFormat() != null) {
builder.setFormatSpec(properties.getFormat());
}
if (properties.getNotificationThresholdMB() != null) {
builder.setNotificationThreshold(properties.getNotificationThresholdMB());
}
// update stream description
String description = properties.getDescription();
if (description != null) {
streamMetaStore.addStream(streamId, description);
}
final StreamConfig newConfig = builder.build();
impersonator.doAs(streamId, new Callable<Void>() {
@Override
public Void call() throws Exception {
writeConfig(newConfig);
return null;
}
});
// Update system metadata for stream
SystemMetadataWriter systemMetadataWriter = new StreamSystemMetadataWriter(metadataStore, streamId, newConfig, description);
systemMetadataWriter.write();
return new StreamProperties(config.getTTL(), config.getFormat(), config.getNotificationThresholdMB());
}
use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class HBaseTableFactory method upgradeCoProcessor.
private void upgradeCoProcessor(TableId tableId, Class<? extends Coprocessor> coprocessor) throws IOException {
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
HTableDescriptor tableDescriptor;
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
// If table doesn't exist, then skip upgrading coprocessor
if (!tableUtil.tableExists(admin, tableId)) {
LOG.debug("TMS Table {} was not found. Skip upgrading coprocessor.", tableId);
return;
}
tableDescriptor = tableUtil.getHTableDescriptor(admin, tableId);
}
// Get cdap version from the table
ProjectInfo.Version version = HBaseTableUtil.getVersion(tableDescriptor);
String hbaseVersion = HBaseTableUtil.getHBaseVersion(tableDescriptor);
if (hbaseVersion != null && hbaseVersion.equals(HBaseVersion.getVersionString()) && version.compareTo(ProjectInfo.getVersion()) >= 0) {
// If cdap has version has not changed or is greater, no need to update. Just enable it, in case
// it has been disabled by the upgrade tool, and return
LOG.info("Table '{}' has not changed and its version '{}' is same or greater than current CDAP version '{}'." + " The underlying HBase version {} has also not changed.", tableId, version, ProjectInfo.getVersion(), hbaseVersion);
enableTable(ddlExecutor, tableId);
return;
}
// create a new descriptor for the table update
HTableDescriptorBuilder newDescriptor = tableUtil.buildHTableDescriptor(tableDescriptor);
// Remove old coprocessor
Map<String, HBaseTableUtil.CoprocessorInfo> coprocessorInfo = HBaseTableUtil.getCoprocessorInfo(tableDescriptor);
for (Map.Entry<String, HBaseTableUtil.CoprocessorInfo> coprocessorEntry : coprocessorInfo.entrySet()) {
newDescriptor.removeCoprocessor(coprocessorEntry.getValue().getClassName());
}
// Add new coprocessor
CoprocessorDescriptor coprocessorDescriptor = coprocessorManager.getCoprocessorDescriptor(coprocessor, Coprocessor.PRIORITY_USER);
Path path = coprocessorDescriptor.getPath() == null ? null : new Path(coprocessorDescriptor.getPath());
newDescriptor.addCoprocessor(coprocessorDescriptor.getClassName(), path, coprocessorDescriptor.getPriority(), coprocessorDescriptor.getProperties());
// Update CDAP version, table prefix
HBaseTableUtil.setVersion(newDescriptor);
HBaseTableUtil.setHBaseVersion(newDescriptor);
HBaseTableUtil.setTablePrefix(newDescriptor, cConf);
// Disable auto-splitting
newDescriptor.setValue(HTableDescriptor.SPLIT_POLICY, cConf.get(Constants.MessagingSystem.TABLE_HBASE_SPLIT_POLICY));
// Disable Table
disableTable(ddlExecutor, tableId);
tableUtil.modifyTable(ddlExecutor, newDescriptor.build());
LOG.debug("Enabling table '{}'...", tableId);
enableTable(ddlExecutor, tableId);
}
LOG.info("Table '{}' update completed.", tableId);
}
use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class HBaseTable method persist.
@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> updates) throws Exception {
if (updates.isEmpty()) {
return;
}
byte[] txId = tx == null ? null : Bytes.toBytes(tx.getTransactionId());
byte[] txWritePointer = tx == null ? null : Bytes.toBytes(tx.getWritePointer());
List<Mutation> mutations = new ArrayList<>();
for (Map.Entry<byte[], NavigableMap<byte[], Update>> row : updates.entrySet()) {
// create these only when they are needed
PutBuilder put = null;
PutBuilder incrementPut = null;
IncrementBuilder increment = null;
for (Map.Entry<byte[], Update> column : row.getValue().entrySet()) {
// we want support tx and non-tx modes
if (tx != null) {
// TODO: hijacking timestamp... bad
Update val = column.getValue();
if (val instanceof IncrementValue) {
if (safeReadlessIncrements) {
increment = getIncrement(increment, row.getKey(), txId, txWritePointer);
increment.add(columnFamily, column.getKey(), tx.getWritePointer(), ((IncrementValue) val).getValue());
} else {
incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
incrementPut.add(columnFamily, column.getKey(), tx.getWritePointer(), Bytes.toBytes(((IncrementValue) val).getValue()));
}
} else if (val instanceof PutValue) {
put = getPut(put, row.getKey(), txId);
put.add(columnFamily, column.getKey(), tx.getWritePointer(), wrapDeleteIfNeeded(((PutValue) val).getValue()));
}
} else {
Update val = column.getValue();
if (val instanceof IncrementValue) {
incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
incrementPut.add(columnFamily, column.getKey(), Bytes.toBytes(((IncrementValue) val).getValue()));
} else if (val instanceof PutValue) {
put = getPut(put, row.getKey(), txId);
put.add(columnFamily, column.getKey(), ((PutValue) val).getValue());
}
}
}
if (incrementPut != null) {
mutations.add(incrementPut.build());
}
if (increment != null) {
mutations.add(increment.build());
}
if (put != null) {
mutations.add(put.build());
}
}
if (!hbaseFlush(mutations)) {
LOG.info("No writes to persist!");
}
}
use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class AbstractHBaseDataSetAdmin method updateTable.
/**
* Performs update on a given HBase table. It will be updated if either its spec has
* changed since the HBase table was created or updated, or if the CDAP version recorded
* in the HTable descriptor is less than the current CDAP version.
*
* @param force forces update regardless of whether the table needs it.
* @throws IOException If update failed.
*/
public void updateTable(boolean force) throws IOException {
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
HTableDescriptor tableDescriptor;
HTableDescriptorBuilder newDescriptor;
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
tableDescriptor = tableUtil.getHTableDescriptor(admin, tableId);
// create a new descriptor for the table update
newDescriptor = tableUtil.buildHTableDescriptor(tableDescriptor);
}
// update any table properties if necessary
boolean needUpdate = needsUpdate(tableDescriptor, newDescriptor) || force;
// Get the cdap version from the table
ProjectInfo.Version version = HBaseTableUtil.getVersion(tableDescriptor);
String hbaseVersion = HBaseTableUtil.getHBaseVersion(tableDescriptor);
if (!needUpdate && hbaseVersion != null && hbaseVersion.equals(HBaseVersion.getVersionString()) && version.compareTo(ProjectInfo.getVersion()) >= 0) {
// If neither the table spec nor the cdap version have changed, no need to update
LOG.info("Table '{}' has not changed and its version '{}' is same or greater " + "than current CDAP version '{}'. The underlying HBase version {} has also not changed.", tableId, version, ProjectInfo.getVersion(), hbaseVersion);
return;
}
// Generate the coprocessor jar
CoprocessorJar coprocessorJar = createCoprocessorJar();
Location jarLocation = coprocessorJar.getJarLocation();
// Check if coprocessor upgrade is needed
Map<String, HBaseTableUtil.CoprocessorInfo> coprocessorInfo = HBaseTableUtil.getCoprocessorInfo(tableDescriptor);
// For all required coprocessors, check if they've need to be upgraded.
for (Class<? extends Coprocessor> coprocessor : coprocessorJar.getCoprocessors()) {
HBaseTableUtil.CoprocessorInfo info = coprocessorInfo.get(coprocessor.getName());
if (info != null) {
// The same coprocessor has been configured, check by the file name to see if they are the same.
if (!jarLocation.getName().equals(info.getPath().getName())) {
// Remove old one and add the new one.
newDescriptor.removeCoprocessor(info.getClassName());
addCoprocessor(newDescriptor, coprocessor, coprocessorJar.getPriority(coprocessor));
}
} else {
// The coprocessor is missing from the table, add it.
addCoprocessor(newDescriptor, coprocessor, coprocessorJar.getPriority(coprocessor));
}
}
// Removes all old coprocessors
Set<String> coprocessorNames = ImmutableSet.copyOf(Iterables.transform(coprocessorJar.coprocessors, CLASS_TO_NAME));
for (String remove : Sets.difference(coprocessorInfo.keySet(), coprocessorNames)) {
newDescriptor.removeCoprocessor(remove);
}
HBaseTableUtil.setVersion(newDescriptor);
HBaseTableUtil.setHBaseVersion(newDescriptor);
HBaseTableUtil.setTablePrefix(newDescriptor, cConf);
LOG.info("Updating table '{}'...", tableId);
TableName tableName = HTableNameConverter.toTableName(cConf.get(Constants.Dataset.TABLE_PREFIX), tableId);
boolean enableTable = false;
try {
ddlExecutor.disableTableIfEnabled(tableName.getNamespaceAsString(), tableName.getQualifierAsString());
enableTable = true;
} catch (TableNotEnabledException e) {
// If the table is in cdap_system namespace enable it regardless so that they can be used later. See CDAP-7324
if (isSystemTable()) {
enableTable = true;
} else {
LOG.debug("Table '{}' was not enabled before update and will not be enabled after update.", tableId);
}
}
tableUtil.modifyTable(ddlExecutor, newDescriptor.build());
if (enableTable) {
LOG.debug("Enabling table '{}'...", tableId);
ddlExecutor.enableTableIfDisabled(tableName.getNamespaceAsString(), tableName.getQualifierAsString());
}
}
LOG.info("Table '{}' update completed.", tableId);
}
use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class LevelDBTable method persist.
@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> changes) throws Exception {
persistedVersion = tx == null ? System.currentTimeMillis() : tx.getWritePointer();
NavigableMap<byte[], NavigableMap<byte[], byte[]>> puts = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
NavigableMap<byte[], NavigableMap<byte[], Long>> increments = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], NavigableMap<byte[], Update>> rowEntry : changes.entrySet()) {
for (Map.Entry<byte[], Update> colEntry : rowEntry.getValue().entrySet()) {
Update val = colEntry.getValue();
if (val instanceof IncrementValue) {
NavigableMap<byte[], Long> incrCols = increments.get(rowEntry.getKey());
if (incrCols == null) {
incrCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
increments.put(rowEntry.getKey(), incrCols);
}
incrCols.put(colEntry.getKey(), ((IncrementValue) val).getValue());
} else if (val instanceof PutValue) {
NavigableMap<byte[], byte[]> putCols = puts.get(rowEntry.getKey());
if (putCols == null) {
putCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
puts.put(rowEntry.getKey(), putCols);
}
putCols.put(colEntry.getKey(), ((PutValue) val).getValue());
}
}
}
if (!increments.isEmpty() || !puts.isEmpty()) {
persist(increments, puts);
}
}
Aggregations