use of org.apache.hadoop.hbase.ipc.HBaseRpcController in project hbase by apache.
the class AsyncBatchRpcRetryingCaller method send.
private void send(Map<ServerName, ServerRequest> actionsByServer, int tries) {
long remainingNs;
if (operationTimeoutNs > 0) {
remainingNs = remainingTimeNs();
if (remainingNs <= 0) {
failAll(actionsByServer.values().stream().flatMap(m -> m.actionsByRegion.values().stream()).flatMap(r -> r.actions.stream()), tries);
return;
}
} else {
remainingNs = Long.MAX_VALUE;
}
actionsByServer.forEach((sn, serverReq) -> {
ClientService.Interface stub;
try {
stub = conn.getRegionServerStub(sn);
} catch (IOException e) {
onError(serverReq.actionsByRegion, tries, e, sn);
return;
}
ClientProtos.MultiRequest req;
List<CellScannable> cells = new ArrayList<>();
try {
req = buildReq(serverReq.actionsByRegion, cells);
} catch (IOException e) {
onError(serverReq.actionsByRegion, tries, e, sn);
return;
}
HBaseRpcController controller = conn.rpcControllerFactory.newController();
resetController(controller, Math.min(rpcTimeoutNs, remainingNs));
if (!cells.isEmpty()) {
controller.setCellScanner(createCellScanner(cells));
}
stub.multi(controller, req, resp -> {
if (controller.failed()) {
onError(serverReq.actionsByRegion, tries, controller.getFailed(), sn);
} else {
try {
onComplete(serverReq.actionsByRegion, tries, sn, ResponseConverter.getResults(req, resp, controller.cellScanner()));
} catch (Exception e) {
onError(serverReq.actionsByRegion, tries, e, sn);
return;
}
}
});
});
}
use of org.apache.hadoop.hbase.ipc.HBaseRpcController in project hbase by apache.
the class AsyncConnectionImpl method getMasterStub.
CompletableFuture<MasterService.Interface> getMasterStub() {
MasterService.Interface masterStub = this.masterStub.get();
if (masterStub == null) {
for (; ; ) {
if (this.masterStubMakeFuture.compareAndSet(null, new CompletableFuture<>())) {
CompletableFuture<MasterService.Interface> future = this.masterStubMakeFuture.get();
makeMasterStub(future);
} else {
CompletableFuture<MasterService.Interface> future = this.masterStubMakeFuture.get();
if (future != null) {
return future;
}
}
}
}
for (; ; ) {
if (masterStubMakeFuture.compareAndSet(null, new CompletableFuture<>())) {
CompletableFuture<MasterService.Interface> future = masterStubMakeFuture.get();
HBaseRpcController controller = getRpcController();
masterStub.isMasterRunning(controller, RequestConverter.buildIsMasterRunningRequest(), new RpcCallback<IsMasterRunningResponse>() {
@Override
public void run(IsMasterRunningResponse resp) {
if (controller.failed() || resp == null || (resp != null && !resp.getIsMasterRunning())) {
makeMasterStub(future);
} else {
future.complete(masterStub);
}
}
});
} else {
CompletableFuture<MasterService.Interface> future = masterStubMakeFuture.get();
if (future != null) {
return future;
}
}
}
}
use of org.apache.hadoop.hbase.ipc.HBaseRpcController in project hbase by apache.
the class MetaTableLocator method verifyRegionLocation.
/**
* Verify we can connect to <code>hostingServer</code> and that its carrying
* <code>regionName</code>.
* @param hostingServer Interface to the server hosting <code>regionName</code>
* @param address The servername that goes with the <code>metaServer</code>
* Interface. Used logging.
* @param regionName The regionname we are interested in.
* @return True if we were able to verify the region located at other side of
* the Interface.
* @throws IOException
*/
// TODO: We should be able to get the ServerName from the AdminProtocol
// rather than have to pass it in. Its made awkward by the fact that the
// HRI is likely a proxy against remote server so the getServerName needs
// to be fixed to go to a local method or to a cache before we can do this.
private boolean verifyRegionLocation(final ClusterConnection connection, AdminService.BlockingInterface hostingServer, final ServerName address, final byte[] regionName) throws IOException {
if (hostingServer == null) {
LOG.info("Passed hostingServer is null");
return false;
}
Throwable t;
HBaseRpcController controller = connection.getRpcControllerFactory().newController();
try {
// Try and get regioninfo from the hosting server.
return ProtobufUtil.getRegionInfo(controller, hostingServer, regionName) != null;
} catch (ConnectException e) {
t = e;
} catch (RetriesExhaustedException e) {
t = e;
} catch (RemoteException e) {
IOException ioe = e.unwrapRemoteException();
t = ioe;
} catch (IOException e) {
Throwable cause = e.getCause();
if (cause != null && cause instanceof EOFException) {
t = cause;
} else if (cause != null && cause.getMessage() != null && cause.getMessage().contains("Connection reset")) {
t = cause;
} else {
t = e;
}
}
LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) + " at address=" + address + ", exception=" + t.getMessage());
return false;
}
use of org.apache.hadoop.hbase.ipc.HBaseRpcController in project hbase by apache.
the class ReplicationProtbufUtil method replicateWALEntry.
/**
* A helper to replicate a list of WAL entries using admin protocol.
* @param admin Admin service
* @param entries Array of WAL entries to be replicated
* @param replicationClusterId Id which will uniquely identify source cluster FS client
* configurations in the replication configuration directory
* @param sourceBaseNamespaceDir Path to source cluster base namespace directory
* @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
* @throws java.io.IOException
*/
public static void replicateWALEntry(final AdminService.BlockingInterface admin, final Entry[] entries, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) throws IOException {
Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p = buildReplicateWALEntryRequest(entries, null, replicationClusterId, sourceBaseNamespaceDir, sourceHFileArchiveDir);
HBaseRpcController controller = new HBaseRpcControllerImpl(p.getSecond());
try {
admin.replicateWALEntry(controller, p.getFirst());
} catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException e) {
throw ProtobufUtil.handleRemoteException(e);
}
}
use of org.apache.hadoop.hbase.ipc.HBaseRpcController in project hbase by apache.
the class ServerManager method isServerReachable.
/**
* Check if a region server is reachable and has the expected start code
*/
public boolean isServerReachable(ServerName server) {
if (server == null)
throw new NullPointerException("Passed server is null");
RetryCounter retryCounter = pingRetryCounterFactory.create();
while (retryCounter.shouldRetry()) {
try {
HBaseRpcController controller = newRpcController();
AdminService.BlockingInterface admin = getRsAdmin(server);
if (admin != null) {
ServerInfo info = ProtobufUtil.getServerInfo(controller, admin);
return info != null && info.hasServerName() && server.getStartcode() == info.getServerName().getStartCode();
}
} catch (IOException ioe) {
LOG.debug("Couldn't reach " + server + ", try=" + retryCounter.getAttemptTimes() + " of " + retryCounter.getMaxAttempts(), ioe);
try {
retryCounter.sleepUntilNextRetry();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
return false;
}
Aggregations