use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.
the class RequestConverter method buildRegionAction.
/**
* Create a protocol buffer multi request for a list of actions.
* Propagates Actions original index.
*
* @param regionName
* @param actions
* @return a multi request
* @throws IOException
*/
public static RegionAction.Builder buildRegionAction(final byte[] regionName, final List<Action> actions, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
for (Action action : actions) {
Row row = action.getAction();
actionBuilder.clear();
actionBuilder.setIndex(action.getOriginalIndex());
mutationBuilder.clear();
if (row instanceof Get) {
Get g = (Get) row;
regionActionBuilder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
} else if (row instanceof Put) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put) row, mutationBuilder)));
} else if (row instanceof Delete) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, (Delete) row, mutationBuilder)));
} else if (row instanceof Append) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, (Append) row, mutationBuilder, action.getNonce())));
} else if (row instanceof Increment) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation((Increment) row, mutationBuilder, action.getNonce())));
} else if (row instanceof RegionCoprocessorServiceExec) {
RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
// DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
if (cpBuilder == null) {
cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
} else {
cpBuilder.clear();
}
regionActionBuilder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
} else if (row instanceof RowMutations) {
throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
} else {
throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
}
}
return regionActionBuilder;
}
use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.
the class TableBasedReplicationQueuesImpl method safeQueueUpdate.
/**
* See safeQueueUpdate(RowMutations mutate)
*
* @param put Row mutation to perform on the queue
*/
private void safeQueueUpdate(Put put) throws ReplicationException, IOException {
RowMutations mutations = new RowMutations(put.getRow());
mutations.add(put);
safeQueueUpdate(mutations);
}
use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.
the class HBaseFsck method resetSplitParent.
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META");
}
use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.
the class ProtobufUtil method toCheckAndMutate.
public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, List<Mutation> mutations) throws IOException {
assert mutations.size() > 0;
byte[] row = condition.getRow().toByteArray();
CheckAndMutate.Builder builder = CheckAndMutate.newBuilder(row);
Filter filter = condition.hasFilter() ? ProtobufUtil.toFilter(condition.getFilter()) : null;
if (filter != null) {
builder.ifMatches(filter);
} else {
builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue());
}
TimeRange timeRange = condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime();
builder.timeRange(timeRange);
try {
if (mutations.size() == 1) {
Mutation m = mutations.get(0);
if (m instanceof Put) {
return builder.build((Put) m);
} else if (m instanceof Delete) {
return builder.build((Delete) m);
} else if (m instanceof Increment) {
return builder.build((Increment) m);
} else if (m instanceof Append) {
return builder.build((Append) m);
} else {
throw new DoNotRetryIOException("Unsupported mutate type: " + m.getClass().getSimpleName().toUpperCase());
}
} else {
return builder.build(new RowMutations(mutations.get(0).getRow()).add(mutations));
}
} catch (IllegalArgumentException e) {
throw new DoNotRetryIOException(e.getMessage());
}
}
use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.
the class RequestConverter method buildNoDataRegionActions.
/**
* Create a protocol buffer multirequest with NO data for a list of actions (data is carried
* otherwise than via protobuf). This means it just notes attributes, whether to write the
* WAL, etc., and the presence in protobuf serves as place holder for the data which is
* coming along otherwise. Note that Get is different. It does not contain 'data' and is always
* carried by protobuf. We return references to the data by adding them to the passed in
* <code>data</code> param.
* <p> Propagates Actions original index.
* <p> The passed in multiRequestBuilder will be populated with region actions.
* @param regionName The region name of the actions.
* @param actions The actions that are grouped by the same region name.
* @param cells Place to stuff references to actual data.
* @param multiRequestBuilder The multiRequestBuilder to be populated with region actions.
* @param regionActionBuilder regionActionBuilder to be used to build region action.
* @param actionBuilder actionBuilder to be used to build action.
* @param mutationBuilder mutationBuilder to be used to build mutation.
* @param nonceGroup nonceGroup to be applied.
* @param indexMap Map of created RegionAction to the original index for a
* RowMutations/CheckAndMutate within the original list of actions
* @throws IOException
*/
public static void buildNoDataRegionActions(final byte[] regionName, final Iterable<Action> actions, final List<CellScannable> cells, final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, long nonceGroup, final Map<Integer, Integer> indexMap) throws IOException {
regionActionBuilder.clear();
RegionAction.Builder builder = getRegionActionBuilderWithRegion(regionActionBuilder, regionName);
ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
boolean hasNonce = false;
List<Action> rowMutationsList = new ArrayList<>();
List<Action> checkAndMutates = new ArrayList<>();
for (Action action : actions) {
Row row = action.getAction();
actionBuilder.clear();
actionBuilder.setIndex(action.getOriginalIndex());
mutationBuilder.clear();
if (row instanceof Get) {
Get g = (Get) row;
builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
} else if (row instanceof Put) {
buildNoDataRegionAction((Put) row, cells, builder, actionBuilder, mutationBuilder);
} else if (row instanceof Delete) {
buildNoDataRegionAction((Delete) row, cells, builder, actionBuilder, mutationBuilder);
} else if (row instanceof Append) {
buildNoDataRegionAction((Append) row, cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (row instanceof Increment) {
buildNoDataRegionAction((Increment) row, cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (row instanceof RegionCoprocessorServiceExec) {
RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
// DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
if (cpBuilder == null) {
cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
} else {
cpBuilder.clear();
}
builder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
} else if (row instanceof RowMutations) {
rowMutationsList.add(action);
} else if (row instanceof CheckAndMutate) {
checkAndMutates.add(action);
} else {
throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
}
}
if (builder.getActionCount() > 0) {
multiRequestBuilder.addRegionAction(builder.build());
}
// We maintain a map to keep track of this RegionAction and the original Action index.
for (Action action : rowMutationsList) {
builder.clear();
getRegionActionBuilderWithRegion(builder, regionName);
boolean hasIncrementOrAppend = buildNoDataRegionAction((RowMutations) action.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
if (hasIncrementOrAppend) {
hasNonce = true;
}
builder.setAtomic(true);
multiRequestBuilder.addRegionAction(builder.build());
// This rowMutations region action is at (multiRequestBuilder.getRegionActionCount() - 1)
// in the overall multiRequest.
indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
}
// Action index.
for (Action action : checkAndMutates) {
builder.clear();
getRegionActionBuilderWithRegion(builder, regionName);
CheckAndMutate cam = (CheckAndMutate) action.getAction();
builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange()));
if (cam.getAction() instanceof Put) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Put) cam.getAction(), cells, builder, actionBuilder, mutationBuilder);
} else if (cam.getAction() instanceof Delete) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Delete) cam.getAction(), cells, builder, actionBuilder, mutationBuilder);
} else if (cam.getAction() instanceof Increment) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Increment) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (cam.getAction() instanceof Append) {
actionBuilder.clear();
mutationBuilder.clear();
buildNoDataRegionAction((Append) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
hasNonce = true;
} else if (cam.getAction() instanceof RowMutations) {
boolean hasIncrementOrAppend = buildNoDataRegionAction((RowMutations) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
if (hasIncrementOrAppend) {
hasNonce = true;
}
builder.setAtomic(true);
} else {
throw new DoNotRetryIOException("CheckAndMutate doesn't support " + cam.getAction().getClass().getName());
}
multiRequestBuilder.addRegionAction(builder.build());
// This CheckAndMutate region action is at (multiRequestBuilder.getRegionActionCount() - 1)
// in the overall multiRequest.
indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
}
if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
multiRequestBuilder.setNonceGroup(nonceGroup);
}
}
Aggregations