use of co.cask.cdap.data2.util.hbase.PutBuilder in project cdap by caskdata.
the class HBaseMetadataTable method createTopic.
@Override
public void createTopic(TopicMetadata topicMetadata) throws TopicAlreadyExistsException, IOException {
TopicId topicId = topicMetadata.getTopicId();
byte[] rowKey = MessagingUtils.toMetadataRowKey(topicId);
PutBuilder putBuilder = tableUtil.buildPut(rowKey);
Get get = tableUtil.buildGet(rowKey).addFamily(columnFamily).build();
try {
boolean completed = false;
while (!completed) {
Result result = hTable.get(get);
byte[] value = result.getValue(columnFamily, COL);
if (value == null) {
TreeMap<String, String> properties = new TreeMap<>(topicMetadata.getProperties());
properties.put(TopicMetadata.GENERATION_KEY, MessagingUtils.Constants.DEFAULT_GENERATION);
putBuilder.add(columnFamily, COL, Bytes.toBytes(GSON.toJson(properties, MAP_TYPE)));
completed = hTable.checkAndPut(rowKey, columnFamily, COL, null, putBuilder.build());
} else {
Map<String, String> properties = GSON.fromJson(Bytes.toString(value), MAP_TYPE);
TopicMetadata metadata = new TopicMetadata(topicId, properties);
if (metadata.exists()) {
throw new TopicAlreadyExistsException(topicId.getNamespace(), topicId.getTopic());
}
int newGenerationId = (metadata.getGeneration() * -1) + 1;
TreeMap<String, String> newProperties = new TreeMap<>(properties);
newProperties.put(TopicMetadata.GENERATION_KEY, Integer.toString(newGenerationId));
putBuilder.add(columnFamily, COL, Bytes.toBytes(GSON.toJson(newProperties, MAP_TYPE)));
completed = hTable.checkAndPut(rowKey, columnFamily, COL, value, putBuilder.build());
}
}
} catch (IOException e) {
throw exceptionHandler.handle(e);
}
}
use of co.cask.cdap.data2.util.hbase.PutBuilder in project cdap by caskdata.
the class HBaseMessageTable method rollback.
@Override
public void rollback(byte[] startKey, byte[] stopKey, byte[] txWritePtr) throws IOException {
Scan scan = tableUtil.buildScan().setStartRow(startKey).setStopRow(stopKey).setCaching(scanCacheRows).build();
List<Put> batchPuts = new ArrayList<>();
try (ResultScanner scanner = DistributedScanner.create(hTable, scan, rowKeyDistributor, scanExecutor)) {
for (Result result : scanner) {
// No need to turn the key back to the original row key because we want to put with the actual row key
PutBuilder putBuilder = tableUtil.buildPut(result.getRow());
putBuilder.add(columnFamily, TX_COL, txWritePtr);
batchPuts.add(putBuilder.build());
}
}
try {
if (!batchPuts.isEmpty()) {
hTable.put(batchPuts);
if (!hTable.isAutoFlush()) {
hTable.flushCommits();
}
}
} catch (IOException e) {
throw exceptionHandler.handle(e);
}
}
use of co.cask.cdap.data2.util.hbase.PutBuilder in project cdap by caskdata.
the class HBaseMetricsTable method put.
@Override
public void put(SortedMap<byte[], ? extends SortedMap<byte[], Long>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], ? extends SortedMap<byte[], Long>> row : updates.entrySet()) {
byte[] distributedKey = createDistributedRowKey(row.getKey());
PutBuilder put = tableUtil.buildPut(distributedKey);
for (Map.Entry<byte[], Long> column : row.getValue().entrySet()) {
put.add(columnFamily, column.getKey(), Bytes.toBytes(column.getValue()));
}
puts.add(put.build());
}
try {
hTable.put(puts);
hTable.flushCommits();
} catch (IOException e) {
throw new DataSetException("Put failed on table " + tableId, e);
}
}
use of co.cask.cdap.data2.util.hbase.PutBuilder in project cdap by caskdata.
the class HBaseMetricsTable method putBytes.
@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
byte[] distributedKey = createDistributedRowKey(row.getKey());
PutBuilder put = tableUtil.buildPut(distributedKey);
for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
put.add(columnFamily, column.getKey(), column.getValue());
}
puts.add(put.build());
}
try {
hTable.put(puts);
hTable.flushCommits();
} catch (IOException e) {
throw new DataSetException("Put failed on table " + tableId, e);
}
}
use of co.cask.cdap.data2.util.hbase.PutBuilder in project cdap by caskdata.
the class HBaseTable method persist.
@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> updates) throws Exception {
if (updates.isEmpty()) {
return;
}
byte[] txId = tx == null ? null : Bytes.toBytes(tx.getTransactionId());
byte[] txWritePointer = tx == null ? null : Bytes.toBytes(tx.getWritePointer());
List<Mutation> mutations = new ArrayList<>();
for (Map.Entry<byte[], NavigableMap<byte[], Update>> row : updates.entrySet()) {
// create these only when they are needed
PutBuilder put = null;
PutBuilder incrementPut = null;
IncrementBuilder increment = null;
for (Map.Entry<byte[], Update> column : row.getValue().entrySet()) {
// we want support tx and non-tx modes
if (tx != null) {
// TODO: hijacking timestamp... bad
Update val = column.getValue();
if (val instanceof IncrementValue) {
if (safeReadlessIncrements) {
increment = getIncrement(increment, row.getKey(), txId, txWritePointer);
increment.add(columnFamily, column.getKey(), tx.getWritePointer(), ((IncrementValue) val).getValue());
} else {
incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
incrementPut.add(columnFamily, column.getKey(), tx.getWritePointer(), Bytes.toBytes(((IncrementValue) val).getValue()));
}
} else if (val instanceof PutValue) {
put = getPut(put, row.getKey(), txId);
put.add(columnFamily, column.getKey(), tx.getWritePointer(), wrapDeleteIfNeeded(((PutValue) val).getValue()));
}
} else {
Update val = column.getValue();
if (val instanceof IncrementValue) {
incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
incrementPut.add(columnFamily, column.getKey(), Bytes.toBytes(((IncrementValue) val).getValue()));
} else if (val instanceof PutValue) {
put = getPut(put, row.getKey(), txId);
put.add(columnFamily, column.getKey(), ((PutValue) val).getValue());
}
}
}
if (incrementPut != null) {
mutations.add(incrementPut.build());
}
if (increment != null) {
mutations.add(increment.build());
}
if (put != null) {
mutations.add(put.build());
}
}
if (!hbaseFlush(mutations)) {
LOG.info("No writes to persist!");
}
}
Aggregations