use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair in project hbase by apache.
the class ProtobufUtil method toGet.
/**
* Convert a protocol buffer Mutate to a Get.
* @param proto the protocol buffer Mutate to convert.
* @param cellScanner
* @return the converted client get.
* @throws IOException
*/
public static Get toGet(final MutationProto proto, final CellScanner cellScanner) throws IOException {
MutationType type = proto.getMutateType();
assert type == MutationType.INCREMENT || type == MutationType.APPEND : type.name();
byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
Get get = null;
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + TextFormat.shortDebugString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + TextFormat.shortDebugString(proto));
}
Cell cell = cellScanner.current();
if (get == null) {
get = new Get(CellUtil.cloneRow(cell));
}
get.addColumn(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell));
}
} else {
get = new Get(row);
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
byte[] qualifier = qv.getQualifier().toByteArray();
if (!qv.hasValue()) {
throw new DoNotRetryIOException("Missing required field: qualifier value");
}
get.addColumn(family, qualifier);
}
}
}
if (proto.hasTimeRange()) {
TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
get.setTimeRange(timeRange);
}
for (NameBytesPair attribute : proto.getAttributeList()) {
get.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return get;
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair in project hbase by apache.
the class RSRpcServices method doNonAtomicRegionMutation.
/**
* Run through the regionMutation <code>rm</code> and per Mutation, do the work, and then when
* done, add an instance of a {@link ResultOrException} that corresponds to each Mutation.
* @param cellsToReturn Could be null. May be allocated in this method. This is what this
* method returns as a 'result'.
* @param closeCallBack the callback to be used with multigets
* @param context the current RpcCallContext
* @return Return the <code>cellScanner</code> passed
*/
private List<CellScannable> doNonAtomicRegionMutation(final HRegion region, final OperationQuota quota, final RegionAction actions, final CellScanner cellScanner, final RegionActionResult.Builder builder, List<CellScannable> cellsToReturn, long nonceGroup, final RegionScannersCloseCallBack closeCallBack, RpcCallContext context, ActivePolicyEnforcement spaceQuotaEnforcement) {
// Gather up CONTIGUOUS Puts and Deletes in this mutations List. Idea is that rather than do
// one at a time, we instead pass them in batch. Be aware that the corresponding
// ResultOrException instance that matches each Put or Delete is then added down in the
// doNonAtomicBatchOp call. We should be staying aligned though the Put and Delete are
// deferred/batched
List<ClientProtos.Action> mutations = null;
long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
IOException sizeIOE = null;
Object lastBlock = null;
ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = ResultOrException.newBuilder();
boolean hasResultOrException = false;
for (ClientProtos.Action action : actions.getActionList()) {
hasResultOrException = false;
resultOrExceptionBuilder.clear();
try {
Result r = null;
if (context != null && context.isRetryImmediatelySupported() && (context.getResponseCellSize() > maxQuotaResultSize || context.getResponseBlockSize() + context.getResponseExceptionSize() > maxQuotaResultSize)) {
// change after the response size limit is reached.
if (sizeIOE == null) {
// We don't need the stack un-winding do don't throw the exception.
// Throwing will kill the JVM's JIT.
//
// Instead just create the exception and then store it.
sizeIOE = new MultiActionResultTooLarge("Max size exceeded" + " CellSize: " + context.getResponseCellSize() + " BlockSize: " + context.getResponseBlockSize());
// Only report the exception once since there's only one request that
// caused the exception. Otherwise this number will dominate the exceptions count.
rpcServer.getMetrics().exception(sizeIOE);
}
// Now that there's an exception is known to be created
// use it for the response.
//
// This will create a copy in the builder.
NameBytesPair pair = ResponseConverter.buildException(sizeIOE);
resultOrExceptionBuilder.setException(pair);
context.incrementResponseExceptionSize(pair.getSerializedSize());
resultOrExceptionBuilder.setIndex(action.getIndex());
builder.addResultOrException(resultOrExceptionBuilder.build());
skipCellsForMutation(action, cellScanner);
continue;
}
if (action.hasGet()) {
long before = EnvironmentEdgeManager.currentTime();
ClientProtos.Get pbGet = action.getGet();
// they are; its a problem for non-native clients like asynchbase. HBASE-20225.
if (pbGet.hasClosestRowBefore() && pbGet.getClosestRowBefore()) {
throw new UnknownProtocolException("Is this a pre-hbase-1.0.0 or asynchbase client? " + "Client is invoking getClosestRowBefore removed in hbase-2.0.0 replaced by " + "reverse Scan.");
}
try {
Get get = ProtobufUtil.toGet(pbGet);
if (context != null) {
r = get(get, (region), closeCallBack, context);
} else {
r = region.get(get);
}
} finally {
final MetricsRegionServer metricsRegionServer = server.getMetrics();
if (metricsRegionServer != null) {
metricsRegionServer.updateGet(region.getTableDescriptor().getTableName(), EnvironmentEdgeManager.currentTime() - before);
}
}
} else if (action.hasServiceCall()) {
hasResultOrException = true;
Message result = execServiceOnRegion(region, action.getServiceCall());
ClientProtos.CoprocessorServiceResult.Builder serviceResultBuilder = ClientProtos.CoprocessorServiceResult.newBuilder();
resultOrExceptionBuilder.setServiceResult(serviceResultBuilder.setValue(serviceResultBuilder.getValueBuilder().setName(result.getClass().getName()).setValue(UnsafeByteOperations.unsafeWrap(result.toByteArray()))));
} else if (action.hasMutation()) {
MutationType type = action.getMutation().getMutateType();
if (type != MutationType.PUT && type != MutationType.DELETE && mutations != null && !mutations.isEmpty()) {
// Flush out any Puts or Deletes already collected.
doNonAtomicBatchOp(builder, region, quota, mutations, cellScanner, spaceQuotaEnforcement);
mutations.clear();
}
switch(type) {
case APPEND:
r = append(region, quota, action.getMutation(), cellScanner, nonceGroup, spaceQuotaEnforcement);
break;
case INCREMENT:
r = increment(region, quota, action.getMutation(), cellScanner, nonceGroup, spaceQuotaEnforcement);
break;
case PUT:
case DELETE:
// Collect the individual mutations and apply in a batch
if (mutations == null) {
mutations = new ArrayList<>(actions.getActionCount());
}
mutations.add(action);
break;
default:
throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
}
} else {
throw new HBaseIOException("Unexpected Action type");
}
if (r != null) {
ClientProtos.Result pbResult = null;
if (isClientCellBlockSupport(context)) {
pbResult = ProtobufUtil.toResultNoData(r);
// Hard to guess the size here. Just make a rough guess.
if (cellsToReturn == null) {
cellsToReturn = new ArrayList<>();
}
cellsToReturn.add(r);
} else {
pbResult = ProtobufUtil.toResult(r);
}
lastBlock = addSize(context, r, lastBlock);
hasResultOrException = true;
resultOrExceptionBuilder.setResult(pbResult);
}
// Could get to here and there was no result and no exception. Presumes we added
// a Put or Delete to the collecting Mutations List for adding later. In this
// case the corresponding ResultOrException instance for the Put or Delete will be added
// down in the doNonAtomicBatchOp method call rather than up here.
} catch (IOException ie) {
rpcServer.getMetrics().exception(ie);
hasResultOrException = true;
NameBytesPair pair = ResponseConverter.buildException(ie);
resultOrExceptionBuilder.setException(pair);
context.incrementResponseExceptionSize(pair.getSerializedSize());
}
if (hasResultOrException) {
// Propagate index.
resultOrExceptionBuilder.setIndex(action.getIndex());
builder.addResultOrException(resultOrExceptionBuilder.build());
}
}
// Finish up any outstanding mutations
if (!CollectionUtils.isEmpty(mutations)) {
doNonAtomicBatchOp(builder, region, quota, mutations, cellScanner, spaceQuotaEnforcement);
}
return cellsToReturn;
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair in project hbase by apache.
the class RSRpcServices method getScanDetailsWithRequest.
public String getScanDetailsWithRequest(ScanRequest request) {
try {
if (!request.hasRegion()) {
return null;
}
Region region = getRegion(request.getRegion());
StringBuilder builder = new StringBuilder();
builder.append("table: ").append(region.getRegionInfo().getTable().getNameAsString());
builder.append(" region: ").append(region.getRegionInfo().getRegionNameAsString());
for (NameBytesPair pair : request.getScan().getAttributeList()) {
if (OperationWithAttributes.ID_ATRIBUTE.equals(pair.getName())) {
builder.append(" operation_id: ").append(Bytes.toString(pair.getValue().toByteArray()));
break;
}
}
return builder.toString();
} catch (IOException ignored) {
return null;
}
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair in project hbase by apache.
the class ProtobufUtil method toScan.
/**
* Convert a protocol buffer Scan to a client Scan
*
* @param proto the protocol buffer Scan to convert
* @return the converted client Scan
* @throws IOException
*/
public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] stopRow = HConstants.EMPTY_END_ROW;
boolean includeStartRow = true;
boolean includeStopRow = false;
if (proto.hasStartRow()) {
startRow = proto.getStartRow().toByteArray();
}
if (proto.hasStopRow()) {
stopRow = proto.getStopRow().toByteArray();
}
if (proto.hasIncludeStartRow()) {
includeStartRow = proto.getIncludeStartRow();
}
if (proto.hasIncludeStopRow()) {
includeStopRow = proto.getIncludeStopRow();
} else {
// old client without this flag, we should consider start=end as a get.
if (ClientUtil.areScanStartRowAndStopRowEqual(startRow, stopRow)) {
includeStopRow = true;
}
}
Scan scan = new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow);
if (proto.hasCacheBlocks()) {
scan.setCacheBlocks(proto.getCacheBlocks());
}
if (proto.hasMaxVersions()) {
scan.readVersions(proto.getMaxVersions());
}
if (proto.hasStoreLimit()) {
scan.setMaxResultsPerColumnFamily(proto.getStoreLimit());
}
if (proto.hasStoreOffset()) {
scan.setRowOffsetPerColumnFamily(proto.getStoreOffset());
}
if (proto.hasLoadColumnFamiliesOnDemand()) {
scan.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand());
}
if (proto.getCfTimeRangeCount() > 0) {
for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) {
TimeRange timeRange = toTimeRange(cftr.getTimeRange());
scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), timeRange.getMax());
}
}
if (proto.hasTimeRange()) {
TimeRange timeRange = toTimeRange(proto.getTimeRange());
scan.setTimeRange(timeRange.getMin(), timeRange.getMax());
}
if (proto.hasFilter()) {
FilterProtos.Filter filter = proto.getFilter();
scan.setFilter(ProtobufUtil.toFilter(filter));
}
if (proto.hasBatchSize()) {
scan.setBatch(proto.getBatchSize());
}
if (proto.hasMaxResultSize()) {
scan.setMaxResultSize(proto.getMaxResultSize());
}
if (proto.hasAllowPartialResults()) {
scan.setAllowPartialResults(proto.getAllowPartialResults());
}
for (NameBytesPair attribute : proto.getAttributeList()) {
scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
if (proto.getColumnCount() > 0) {
for (Column column : proto.getColumnList()) {
byte[] family = column.getFamily().toByteArray();
if (column.getQualifierCount() > 0) {
for (ByteString qualifier : column.getQualifierList()) {
scan.addColumn(family, qualifier.toByteArray());
}
} else {
scan.addFamily(family);
}
}
}
if (proto.hasReversed()) {
scan.setReversed(proto.getReversed());
}
if (proto.hasConsistency()) {
scan.setConsistency(toConsistency(proto.getConsistency()));
}
if (proto.hasCaching()) {
scan.setCaching(proto.getCaching());
}
if (proto.hasMvccReadPoint()) {
PackagePrivateFieldAccessor.setMvccReadPoint(scan, proto.getMvccReadPoint());
}
if (proto.hasReadType()) {
scan.setReadType(toReadType(proto.getReadType()));
}
if (proto.getNeedCursorResult()) {
scan.setNeedCursorResult(true);
}
return scan;
}
use of org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair in project hbase by apache.
the class ProtobufUtil method toDelta.
private static <T extends Mutation> T toDelta(Function<Bytes, T> supplier, ConsumerWithException<T, Cell> consumer, final MutationProto proto, final CellScanner cellScanner) throws IOException {
byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
T mutation = row == null ? null : supplier.apply(new Bytes(row));
int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
if (cellCount > 0) {
// The proto has metadata only and the data is separate to be found in the cellScanner.
if (cellScanner == null) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
}
for (int i = 0; i < cellCount; i++) {
if (!cellScanner.advance()) {
throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
}
Cell cell = cellScanner.current();
if (mutation == null) {
mutation = supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
}
consumer.accept(mutation, cell);
}
} else {
if (mutation == null) {
throw new IllegalArgumentException("row cannot be null");
}
for (ColumnValue column : proto.getColumnValueList()) {
byte[] family = column.getFamily().toByteArray();
for (QualifierValue qv : column.getQualifierValueList()) {
byte[] qualifier = qv.getQualifier().toByteArray();
if (!qv.hasValue()) {
throw new DoNotRetryIOException("Missing required field: qualifier value");
}
byte[] value = qv.getValue().toByteArray();
byte[] tags = null;
if (qv.hasTags()) {
tags = qv.getTags().toByteArray();
}
consumer.accept(mutation, ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(mutation.getRow()).setFamily(family).setQualifier(qualifier).setTimestamp(cellTimestampOrLatest(qv)).setType(KeyValue.Type.Put.getCode()).setValue(value).setTags(tags).build());
}
}
}
mutation.setDurability(toDurability(proto.getDurability()));
for (NameBytesPair attribute : proto.getAttributeList()) {
mutation.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
return mutation;
}
Aggregations