use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class Indexer method postBatchMutateIndispensably.
@Override
public void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp, final boolean success) throws IOException {
if (this.disabled) {
super.postBatchMutateIndispensably(c, miniBatchOp, success);
return;
}
this.builder.batchCompleted(miniBatchOp);
if (success) {
// if miniBatchOp was successfully written, write index updates
//each batch operation, only the first one will have anything useful, so we can just grab that
Mutation mutation = miniBatchOp.getOperation(0);
WALEdit edit = miniBatchOp.getWalEdit(0);
doPost(edit, mutation, mutation.getDurability());
}
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class ProtobufUtil method getMutations.
/**
* Each ByteString entry is a byte array serialized from MutationProto instance
* @param mutations
* @throws IOException
*/
private static List<Mutation> getMutations(List<ByteString> mutations) throws IOException {
List<Mutation> result = new ArrayList<Mutation>();
for (ByteString mutation : mutations) {
MutationProto mProto = MutationProto.parseFrom(mutation);
result.add(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto));
}
return result;
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class TestNonTxIndexBuilder method assertContains.
// Assert that the given collection of indexUpdates contains the given cell
private void assertContains(Collection<Pair<Mutation, byte[]>> indexUpdates, final long mutationTs, final byte[] row, final Type cellType, final byte[] fam, final byte[] qual, final long cellTs) {
Predicate<Pair<Mutation, byte[]>> hasCellPredicate = new Predicate<Pair<Mutation, byte[]>>() {
@Override
public boolean apply(Pair<Mutation, byte[]> input) {
assertEquals(TEST_TABLE_INDEX_STRING, Bytes.toString(input.getSecond()));
Mutation mutation = input.getFirst();
if (mutationTs == mutation.getTimeStamp()) {
NavigableMap<byte[], List<Cell>> familyCellMap = mutation.getFamilyCellMap();
Cell updateCell = familyCellMap.get(fam).get(0);
if (cellType == KeyValue.Type.codeToType(updateCell.getTypeByte()) && Bytes.compareTo(fam, CellUtil.cloneFamily(updateCell)) == 0 && Bytes.compareTo(qual, CellUtil.cloneQualifier(updateCell)) == 0 && cellTs == updateCell.getTimestamp()) {
return true;
}
}
return false;
}
};
Optional<Pair<Mutation, byte[]>> tryFind = Iterables.tryFind(indexUpdates, hasCellPredicate);
assertTrue(tryFind.isPresent());
}
use of org.apache.hadoop.hbase.client.Mutation in project cdap by caskdata.
the class HBaseTable method persist.
@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> updates) throws Exception {
if (updates.isEmpty()) {
return;
}
List<Mutation> mutations = new ArrayList<>();
for (Map.Entry<byte[], NavigableMap<byte[], Update>> row : updates.entrySet()) {
// create these only when they are needed
PutBuilder put = null;
PutBuilder incrementPut = null;
IncrementBuilder increment = null;
for (Map.Entry<byte[], Update> column : row.getValue().entrySet()) {
// we want support tx and non-tx modes
if (tx != null) {
// TODO: hijacking timestamp... bad
Update val = column.getValue();
if (val instanceof IncrementValue) {
if (safeReadlessIncrements) {
increment = getIncrement(increment, row.getKey(), true);
increment.add(columnFamily, column.getKey(), tx.getWritePointer(), ((IncrementValue) val).getValue());
} else {
incrementPut = getPutForIncrement(incrementPut, row.getKey());
incrementPut.add(columnFamily, column.getKey(), tx.getWritePointer(), Bytes.toBytes(((IncrementValue) val).getValue()));
}
} else if (val instanceof PutValue) {
put = getPut(put, row.getKey());
put.add(columnFamily, column.getKey(), tx.getWritePointer(), wrapDeleteIfNeeded(((PutValue) val).getValue()));
}
} else {
Update val = column.getValue();
if (val instanceof IncrementValue) {
incrementPut = getPutForIncrement(incrementPut, row.getKey());
incrementPut.add(columnFamily, column.getKey(), Bytes.toBytes(((IncrementValue) val).getValue()));
} else if (val instanceof PutValue) {
put = getPut(put, row.getKey());
put.add(columnFamily, column.getKey(), ((PutValue) val).getValue());
}
}
}
if (incrementPut != null) {
mutations.add(incrementPut.build());
}
if (increment != null) {
mutations.add(increment.build());
}
if (put != null) {
mutations.add(put.build());
}
}
if (!hbaseFlush(mutations)) {
LOG.info("No writes to persist!");
}
}
use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.
the class DefaultVisibilityLabelServiceImpl method addLabels.
@Override
public OperationStatus[] addLabels(List<byte[]> labels) throws IOException {
assert labelsRegion != null;
OperationStatus[] finalOpStatus = new OperationStatus[labels.size()];
List<Mutation> puts = new ArrayList<>(labels.size());
int i = 0;
for (byte[] label : labels) {
String labelStr = Bytes.toString(label);
if (this.labelsCache.getLabelOrdinal(labelStr) > 0) {
finalOpStatus[i] = new OperationStatus(OperationStatusCode.FAILURE, new LabelAlreadyExistsException("Label '" + labelStr + "' already exists"));
} else {
Put p = new Put(Bytes.toBytes(ordinalCounter.get()));
p.addImmutable(LABELS_TABLE_FAMILY, LABEL_QUALIFIER, label, LABELS_TABLE_TAGS);
if (LOG.isDebugEnabled()) {
LOG.debug("Adding the label " + labelStr);
}
puts.add(p);
ordinalCounter.incrementAndGet();
}
i++;
}
if (mutateLabelsRegion(puts, finalOpStatus)) {
updateZk(true);
}
return finalOpStatus;
}
Aggregations