use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.
the class RSRpcServices method mutateRows.
/**
* Mutate a list of rows atomically.
*
* @param region
* @param actions
* @param cellScanner if non-null, the mutation data -- the Cell content.
* @throws IOException
*/
private void mutateRows(final Region region, final List<ClientProtos.Action> actions, final CellScanner cellScanner, RegionActionResult.Builder builder) throws IOException {
if (!region.getRegionInfo().isMetaTable()) {
regionServer.cacheFlusher.reclaimMemStoreMemory();
}
RowMutations rm = null;
int i = 0;
ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder = ClientProtos.ResultOrException.newBuilder();
for (ClientProtos.Action action : actions) {
if (action.hasGet()) {
throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
}
MutationType type = action.getMutation().getMutateType();
if (rm == null) {
rm = new RowMutations(action.getMutation().getRow().toByteArray(), actions.size());
}
switch(type) {
case PUT:
rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
break;
case DELETE:
rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
break;
default:
throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
}
// To unify the response format with doNonAtomicRegionMutation and read through client's
// AsyncProcess we have to add an empty result instance per operation
resultOrExceptionOrBuilder.clear();
resultOrExceptionOrBuilder.setIndex(i++);
builder.addResultOrException(resultOrExceptionOrBuilder.build());
}
region.mutateRow(rm);
}
use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.
the class ThriftUtilities method rowMutationsFromThrift.
/**
* Creates a {@link RowMutations} (HBase) from a {@link TRowMutations} (Thrift)
*
* @param in the <code>TRowMutations</code> to convert
*
* @return converted <code>RowMutations</code>
*/
public static RowMutations rowMutationsFromThrift(TRowMutations in) throws IOException {
List<TMutation> mutations = in.getMutations();
RowMutations out = new RowMutations(in.getRow(), mutations.size());
for (TMutation mutation : mutations) {
if (mutation.isSetPut()) {
out.add(putFromThrift(mutation.getPut()));
}
if (mutation.isSetDeleteSingle()) {
out.add(deleteFromThrift(mutation.getDeleteSingle()));
}
}
return out;
}
use of org.apache.hadoop.hbase.client.RowMutations in project phoenix by apache.
the class UpgradeIT method removeBaseColumnCountKV.
private static void removeBaseColumnCountKV(String tenantId, String schemaName, String tableName) throws Exception {
byte[] rowKey = SchemaUtil.getTableKey(tenantId == null ? new byte[0] : Bytes.toBytes(tenantId), schemaName == null ? new byte[0] : Bytes.toBytes(schemaName), Bytes.toBytes(tableName));
Put viewColumnDefinitionPut = new Put(rowKey, HConstants.LATEST_TIMESTAMP);
viewColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, HConstants.LATEST_TIMESTAMP, null);
try (PhoenixConnection conn = (DriverManager.getConnection(getUrl())).unwrap(PhoenixConnection.class)) {
try (HTableInterface htable = conn.getQueryServices().getTable(Bytes.toBytes(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME))) {
RowMutations mutations = new RowMutations(rowKey);
mutations.add(viewColumnDefinitionPut);
htable.mutateRow(mutations);
}
}
}
Aggregations