use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class TestClientNoCluster method doMetaGetResponse.
static GetResponse doMetaGetResponse(final SortedMap<byte[], Pair<HRegionInfo, ServerName>> meta, final GetRequest request) {
ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
ByteString row = request.getGet().getRow();
Pair<HRegionInfo, ServerName> p = meta.get(row.toByteArray());
if (p != null) {
resultBuilder.addCell(getRegionInfo(row, p.getFirst()));
resultBuilder.addCell(getServer(row, p.getSecond()));
}
resultBuilder.addCell(getStartCode(row));
GetResponse.Builder builder = GetResponse.newBuilder();
builder.setResult(resultBuilder.build());
return builder.build();
}
use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class WALKey method getBuilder.
public WALProtos.WALKey.Builder getBuilder(WALCellCodec.ByteStringCompressor compressor) throws IOException {
WALProtos.WALKey.Builder builder = WALProtos.WALKey.newBuilder();
if (compressionContext == null) {
builder.setEncodedRegionName(UnsafeByteOperations.unsafeWrap(this.encodedRegionName));
builder.setTableName(UnsafeByteOperations.unsafeWrap(this.tablename.getName()));
} else {
builder.setEncodedRegionName(compressor.compress(this.encodedRegionName, compressionContext.regionDict));
builder.setTableName(compressor.compress(this.tablename.getName(), compressionContext.tableDict));
}
builder.setLogSequenceNumber(getSequenceId());
builder.setWriteTime(writeTime);
if (this.origLogSeqNum > 0) {
builder.setOrigSequenceNumber(this.origLogSeqNum);
}
if (this.nonce != HConstants.NO_NONCE) {
builder.setNonce(nonce);
}
if (this.nonceGroup != HConstants.NO_NONCE) {
builder.setNonceGroup(nonceGroup);
}
HBaseProtos.UUID.Builder uuidBuilder = HBaseProtos.UUID.newBuilder();
for (UUID clusterId : clusterIds) {
uuidBuilder.setLeastSigBits(clusterId.getLeastSignificantBits());
uuidBuilder.setMostSigBits(clusterId.getMostSignificantBits());
builder.addClusterIds(uuidBuilder.build());
}
if (replicationScope != null) {
for (Map.Entry<byte[], Integer> e : replicationScope.entrySet()) {
ByteString family = (compressionContext == null) ? UnsafeByteOperations.unsafeWrap(e.getKey()) : compressor.compress(e.getKey(), compressionContext.familyDict);
builder.addScopes(FamilyScope.newBuilder().setFamily(family).setScopeType(ScopeType.forNumber(e.getValue())));
}
}
return builder;
}
use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class ProtobufUtil method toScan.
/**
* Convert a protocol buffer Scan to a client Scan
*
* @param proto the protocol buffer Scan to convert
* @return the converted client Scan
* @throws IOException
*/
public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] stopRow = HConstants.EMPTY_END_ROW;
boolean includeStartRow = true;
boolean includeStopRow = false;
if (proto.hasStartRow()) {
startRow = proto.getStartRow().toByteArray();
}
if (proto.hasStopRow()) {
stopRow = proto.getStopRow().toByteArray();
}
if (proto.hasIncludeStartRow()) {
includeStartRow = proto.getIncludeStartRow();
}
if (proto.hasIncludeStopRow()) {
includeStopRow = proto.getIncludeStopRow();
}
Scan scan = new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow);
if (proto.hasCacheBlocks()) {
scan.setCacheBlocks(proto.getCacheBlocks());
}
if (proto.hasMaxVersions()) {
scan.setMaxVersions(proto.getMaxVersions());
}
if (proto.hasStoreLimit()) {
scan.setMaxResultsPerColumnFamily(proto.getStoreLimit());
}
if (proto.hasStoreOffset()) {
scan.setRowOffsetPerColumnFamily(proto.getStoreOffset());
}
if (proto.hasLoadColumnFamiliesOnDemand()) {
scan.setLoadColumnFamiliesOnDemand(proto.getLoadColumnFamiliesOnDemand());
}
if (proto.getCfTimeRangeCount() > 0) {
for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) {
TimeRange timeRange = protoToTimeRange(cftr.getTimeRange());
scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange);
}
}
if (proto.hasTimeRange()) {
TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
scan.setTimeRange(timeRange);
}
if (proto.hasFilter()) {
FilterProtos.Filter filter = proto.getFilter();
scan.setFilter(ProtobufUtil.toFilter(filter));
}
if (proto.hasBatchSize()) {
scan.setBatch(proto.getBatchSize());
}
if (proto.hasMaxResultSize()) {
scan.setMaxResultSize(proto.getMaxResultSize());
}
if (proto.hasSmall()) {
scan.setSmall(proto.getSmall());
}
if (proto.hasAllowPartialResults()) {
scan.setAllowPartialResults(proto.getAllowPartialResults());
}
for (NameBytesPair attribute : proto.getAttributeList()) {
scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
}
if (proto.getColumnCount() > 0) {
for (Column column : proto.getColumnList()) {
byte[] family = column.getFamily().toByteArray();
if (column.getQualifierCount() > 0) {
for (ByteString qualifier : column.getQualifierList()) {
scan.addColumn(family, qualifier.toByteArray());
}
} else {
scan.addFamily(family);
}
}
}
if (proto.hasReversed()) {
scan.setReversed(proto.getReversed());
}
if (proto.hasConsistency()) {
scan.setConsistency(toConsistency(proto.getConsistency()));
}
if (proto.hasCaching()) {
scan.setCaching(proto.getCaching());
}
if (proto.hasMvccReadPoint()) {
PackagePrivateFieldAccessor.setMvccReadPoint(scan, proto.getMvccReadPoint());
}
if (scan.isSmall()) {
scan.setReadType(Scan.ReadType.PREAD);
} else if (proto.hasReadType()) {
scan.setReadType(toReadType(proto.getReadType()));
}
return scan;
}
use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class FirstKeyValueMatchingQualifiersFilter method parseFrom.
/**
* @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance
* @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray
*/
public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FirstKeyValueMatchingQualifiersFilter proto;
try {
proto = FilterProtos.FirstKeyValueMatchingQualifiersFilter.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
TreeSet<byte[]> qualifiers = new TreeSet<>(Bytes.BYTES_COMPARATOR);
for (ByteString qualifier : proto.getQualifiersList()) {
qualifiers.add(qualifier.toByteArray());
}
return new FirstKeyValueMatchingQualifiersFilter(qualifiers);
}
use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class RSRpcServices method replay.
/**
* Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
* that the given mutations will be durable on the receiving RS if this method returns without any
* exception.
* @param controller the RPC controller
* @param request the request
* @throws ServiceException
*/
@Override
@QosPriority(priority = HConstants.REPLAY_QOS)
public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException {
long before = EnvironmentEdgeManager.currentTime();
CellScanner cells = ((HBaseRpcController) controller).cellScanner();
try {
checkOpen();
List<WALEntry> entries = request.getEntryList();
if (entries == null || entries.isEmpty()) {
// empty input
return ReplicateWALEntryResponse.newBuilder().build();
}
ByteString regionName = entries.get(0).getKey().getEncodedRegionName();
Region region = regionServer.getRegionByEncodedName(regionName.toStringUtf8());
RegionCoprocessorHost coprocessorHost = ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() : // do not invoke coprocessors if this is a secondary region replica
null;
List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<>();
// Skip adding the edits to WAL if this is a secondary region replica
boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
Durability durability = isPrimary ? Durability.USE_DEFAULT : Durability.SKIP_WAL;
for (WALEntry entry : entries) {
if (!regionName.equals(entry.getKey().getEncodedRegionName())) {
throw new NotServingRegionException("Replay request contains entries from multiple " + "regions. First region:" + regionName.toStringUtf8() + " , other region:" + entry.getKey().getEncodedRegionName());
}
if (regionServer.nonceManager != null && isPrimary) {
long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
regionServer.nonceManager.reportOperationFromWal(nonceGroup, nonce, entry.getKey().getWriteTime());
}
Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null : new Pair<>();
List<WALSplitter.MutationReplay> edits = WALSplitter.getMutationsFromWALEntry(entry, cells, walEntry, durability);
if (coprocessorHost != null) {
// KeyValue.
if (coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(), walEntry.getSecond())) {
// if bypass this log entry, ignore it ...
continue;
}
walEntries.add(walEntry);
}
if (edits != null && !edits.isEmpty()) {
long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
OperationStatus[] result = doReplayBatchOp(region, edits, replaySeqId);
// check if it's a partial success
for (int i = 0; result != null && i < result.length; i++) {
if (result[i] != OperationStatus.SUCCESS) {
throw new IOException(result[i].getExceptionMsg());
}
}
}
}
//sync wal at the end because ASYNC_WAL is used above
WAL wal = getWAL(region);
if (wal != null) {
wal.sync();
}
if (coprocessorHost != null) {
for (Pair<WALKey, WALEdit> entry : walEntries) {
coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(), entry.getSecond());
}
}
return ReplicateWALEntryResponse.newBuilder().build();
} catch (IOException ie) {
throw new ServiceException(ie);
} finally {
if (regionServer.metricsRegionServer != null) {
regionServer.metricsRegionServer.updateReplay(EnvironmentEdgeManager.currentTime() - before);
}
}
}
Aggregations