use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class RSRpcServices method getRegion.
/**
* Find the HRegion based on a region specifier
*
* @param regionSpecifier the region specifier
* @return the corresponding region
* @throws IOException if the specifier is not null,
* but failed to find the region
*/
@VisibleForTesting
public Region getRegion(final RegionSpecifier regionSpecifier) throws IOException {
ByteString value = regionSpecifier.getValue();
RegionSpecifierType type = regionSpecifier.getType();
switch(type) {
case REGION_NAME:
byte[] regionName = value.toByteArray();
String encodedRegionName = HRegionInfo.encodeRegionName(regionName);
return regionServer.getRegionByEncodedName(regionName, encodedRegionName);
case ENCODED_REGION_NAME:
return regionServer.getRegionByEncodedName(value.toStringUtf8());
default:
throw new DoNotRetryIOException("Unsupported region specifier type: " + type);
}
}
use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class RSRpcServices method getStoreFile.
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public GetStoreFileResponse getStoreFile(final RpcController controller, final GetStoreFileRequest request) throws ServiceException {
try {
checkOpen();
Region region = getRegion(request.getRegion());
requestCount.increment();
Set<byte[]> columnFamilies;
if (request.getFamilyCount() == 0) {
columnFamilies = region.getTableDesc().getFamiliesKeys();
} else {
columnFamilies = new TreeSet<>(Bytes.BYTES_RAWCOMPARATOR);
for (ByteString cf : request.getFamilyList()) {
columnFamilies.add(cf.toByteArray());
}
}
int nCF = columnFamilies.size();
List<String> fileList = region.getStoreFileList(columnFamilies.toArray(new byte[nCF][]));
GetStoreFileResponse.Builder builder = GetStoreFileResponse.newBuilder();
builder.addAllStoreFile(fileList);
return builder.build();
} catch (IOException ie) {
throw new ServiceException(ie);
}
}
use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class TestClientNoCluster method doMetaScanResponse.
static ScanResponse doMetaScanResponse(final SortedMap<byte[], Pair<HRegionInfo, ServerName>> meta, final AtomicLong sequenceids, final ScanRequest request) {
ScanResponse.Builder builder = ScanResponse.newBuilder();
int max = request.getNumberOfRows();
int count = 0;
Map<byte[], Pair<HRegionInfo, ServerName>> tail = request.hasScan() ? meta.tailMap(request.getScan().getStartRow().toByteArray()) : meta;
ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder();
for (Map.Entry<byte[], Pair<HRegionInfo, ServerName>> e : tail.entrySet()) {
// Can be 0 on open of a scanner -- i.e. rpc to setup scannerid only.
if (max <= 0)
break;
if (++count > max)
break;
HRegionInfo hri = e.getValue().getFirst();
ByteString row = UnsafeByteOperations.unsafeWrap(hri.getRegionName());
resultBuilder.clear();
resultBuilder.addCell(getRegionInfo(row, hri));
resultBuilder.addCell(getServer(row, e.getValue().getSecond()));
resultBuilder.addCell(getStartCode(row));
builder.addResults(resultBuilder.build());
// Set more to false if we are on the last region in table.
if (hri.getEndKey().length <= 0)
builder.setMoreResults(false);
else
builder.setMoreResults(true);
}
// If no scannerid, set one.
builder.setScannerId(request.hasScannerId() ? request.getScannerId() : sequenceids.incrementAndGet());
return builder.build();
}
use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class TestPriorityRpc method testQosFunctionForMeta.
@Test
public void testQosFunctionForMeta() throws IOException {
priority = regionServer.rpcServices.getPriority();
RequestHeader.Builder headerBuilder = RequestHeader.newBuilder();
//create a rpc request that has references to hbase:meta region and also
//uses one of the known argument classes (known argument classes are
//listed in HRegionServer.QosFunctionImpl.knownArgumentClasses)
headerBuilder.setMethodName("foo");
GetRequest.Builder getRequestBuilder = GetRequest.newBuilder();
RegionSpecifier.Builder regionSpecifierBuilder = RegionSpecifier.newBuilder();
regionSpecifierBuilder.setType(RegionSpecifierType.REGION_NAME);
ByteString name = UnsafeByteOperations.unsafeWrap(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
regionSpecifierBuilder.setValue(name);
RegionSpecifier regionSpecifier = regionSpecifierBuilder.build();
getRequestBuilder.setRegion(regionSpecifier);
Get.Builder getBuilder = Get.newBuilder();
getBuilder.setRow(UnsafeByteOperations.unsafeWrap("somerow".getBytes()));
getRequestBuilder.setGet(getBuilder.build());
GetRequest getRequest = getRequestBuilder.build();
RequestHeader header = headerBuilder.build();
HRegion mockRegion = Mockito.mock(HRegion.class);
HRegionServer mockRS = Mockito.mock(HRegionServer.class);
RSRpcServices mockRpc = Mockito.mock(RSRpcServices.class);
Mockito.when(mockRS.getRSRpcServices()).thenReturn(mockRpc);
HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
Mockito.when(mockRpc.getRegion((RegionSpecifier) Mockito.any())).thenReturn(mockRegion);
Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
Mockito.when(mockRegionInfo.isSystemTable()).thenReturn(true);
// Presume type.
((AnnotationReadingPriorityFunction) priority).setRegionServer(mockRS);
assertEquals(HConstants.SYSTEMTABLE_QOS, priority.getPriority(header, getRequest, User.createUserForTesting(regionServer.conf, "someuser", new String[] { "somegroup" })));
}
use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString in project hbase by apache.
the class ReplicationSerDeHelper method convert.
public static ReplicationPeerConfig convert(ReplicationProtos.ReplicationPeer peer) {
ReplicationPeerConfig peerConfig = new ReplicationPeerConfig();
if (peer.hasClusterkey()) {
peerConfig.setClusterKey(peer.getClusterkey());
}
if (peer.hasReplicationEndpointImpl()) {
peerConfig.setReplicationEndpointImpl(peer.getReplicationEndpointImpl());
}
for (HBaseProtos.BytesBytesPair pair : peer.getDataList()) {
peerConfig.getPeerData().put(pair.getFirst().toByteArray(), pair.getSecond().toByteArray());
}
for (HBaseProtos.NameStringPair pair : peer.getConfigurationList()) {
peerConfig.getConfiguration().put(pair.getName(), pair.getValue());
}
Map<TableName, ? extends Collection<String>> tableCFsMap = convert2Map(peer.getTableCfsList().toArray(new ReplicationProtos.TableCF[peer.getTableCfsCount()]));
if (tableCFsMap != null) {
peerConfig.setTableCFsMap(tableCFsMap);
}
List<ByteString> namespacesList = peer.getNamespacesList();
if (namespacesList != null && namespacesList.size() != 0) {
Set<String> namespaces = new HashSet<>();
for (ByteString namespace : namespacesList) {
namespaces.add(namespace.toStringUtf8());
}
peerConfig.setNamespaces(namespaces);
}
if (peer.hasBandwidth()) {
peerConfig.setBandwidth(peer.getBandwidth());
}
return peerConfig;
}
Aggregations