Search in sources :

Example 81 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class HRegionServer method execRegionServerService.

public CoprocessorServiceResponse execRegionServerService(@SuppressWarnings("UnusedParameters") final RpcController controller, final CoprocessorServiceRequest serviceRequest) throws ServiceException {
    try {
        ServerRpcController serviceController = new ServerRpcController();
        CoprocessorServiceCall call = serviceRequest.getCall();
        String serviceName = call.getServiceName();
        com.google.protobuf.Service service = coprocessorServiceHandlers.get(serviceName);
        if (service == null) {
            throw new UnknownProtocolException(null, "No registered coprocessor service found for " + serviceName);
        }
        com.google.protobuf.Descriptors.ServiceDescriptor serviceDesc = service.getDescriptorForType();
        String methodName = call.getMethodName();
        com.google.protobuf.Descriptors.MethodDescriptor methodDesc = serviceDesc.findMethodByName(methodName);
        if (methodDesc == null) {
            throw new UnknownProtocolException(service.getClass(), "Unknown method " + methodName + " called on service " + serviceName);
        }
        com.google.protobuf.Message request = CoprocessorRpcUtils.getRequest(service, methodDesc, call.getRequest());
        final com.google.protobuf.Message.Builder responseBuilder = service.getResponsePrototype(methodDesc).newBuilderForType();
        service.callMethod(methodDesc, serviceController, request, new com.google.protobuf.RpcCallback<com.google.protobuf.Message>() {

            @Override
            public void run(com.google.protobuf.Message message) {
                if (message != null) {
                    responseBuilder.mergeFrom(message);
                }
            }
        });
        IOException exception = CoprocessorRpcUtils.getControllerException(serviceController);
        if (exception != null) {
            throw exception;
        }
        return CoprocessorRpcUtils.getResponse(responseBuilder.build(), HConstants.EMPTY_BYTE_ARRAY);
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
}
Also used : CoprocessorServiceCall(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) UnknownProtocolException(org.apache.hadoop.hbase.exceptions.UnknownProtocolException) TableDescriptors(org.apache.hadoop.hbase.TableDescriptors) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors)

Example 82 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class HRegionServer method reportRegionStateTransition.

@Override
public boolean reportRegionStateTransition(final RegionStateTransitionContext context) {
    TransitionCode code = context.getCode();
    long openSeqNum = context.getOpenSeqNum();
    long masterSystemTime = context.getMasterSystemTime();
    HRegionInfo[] hris = context.getHris();
    if (TEST_SKIP_REPORTING_TRANSITION) {
        // to handle the region transition report at all.
        if (code == TransitionCode.OPENED) {
            Preconditions.checkArgument(hris != null && hris.length == 1);
            if (hris[0].isMetaRegion()) {
                try {
                    MetaTableLocator.setMetaLocation(getZooKeeper(), serverName, hris[0].getReplicaId(), State.OPEN);
                } catch (KeeperException e) {
                    LOG.info("Failed to update meta location", e);
                    return false;
                }
            } else {
                try {
                    MetaTableAccessor.updateRegionLocation(clusterConnection, hris[0], serverName, openSeqNum, masterSystemTime);
                } catch (IOException e) {
                    LOG.info("Failed to update meta", e);
                    return false;
                }
            }
        }
        return true;
    }
    ReportRegionStateTransitionRequest.Builder builder = ReportRegionStateTransitionRequest.newBuilder();
    builder.setServer(ProtobufUtil.toServerName(serverName));
    RegionStateTransition.Builder transition = builder.addTransitionBuilder();
    transition.setTransitionCode(code);
    if (code == TransitionCode.OPENED && openSeqNum >= 0) {
        transition.setOpenSeqNum(openSeqNum);
    }
    for (HRegionInfo hri : hris) {
        transition.addRegionInfo(HRegionInfo.convert(hri));
    }
    ReportRegionStateTransitionRequest request = builder.build();
    while (keepLooping()) {
        RegionServerStatusService.BlockingInterface rss = rssStub;
        try {
            if (rss == null) {
                createRegionServerStatusStub();
                continue;
            }
            ReportRegionStateTransitionResponse response = rss.reportRegionStateTransition(null, request);
            if (response.hasErrorMessage()) {
                LOG.info("Failed to transition " + hris[0] + " to " + code + ": " + response.getErrorMessage());
                return false;
            }
            return true;
        } catch (ServiceException se) {
            IOException ioe = ProtobufUtil.getRemoteException(se);
            LOG.info("Failed to report region transition, will retry", ioe);
            if (rssStub == rss) {
                rssStub = null;
            }
        }
    }
    return false;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) RegionStateTransition(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition) ReportRegionStateTransitionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TransitionCode(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ReportRegionStateTransitionRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest) RegionServerStatusService(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService) KeeperException(org.apache.zookeeper.KeeperException)

Example 83 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class HRegionServer method requestRegionSplit.

@Override
public long requestRegionSplit(final HRegionInfo regionInfo, final byte[] splitRow) {
    NonceGenerator ng = clusterConnection.getNonceGenerator();
    final long nonceGroup = ng.getNonceGroup();
    final long nonce = ng.newNonce();
    long procId = -1;
    SplitTableRegionRequest request = RequestConverter.buildSplitTableRegionRequest(regionInfo, splitRow, nonceGroup, nonce);
    while (keepLooping()) {
        RegionServerStatusService.BlockingInterface rss = rssStub;
        try {
            if (rss == null) {
                createRegionServerStatusStub();
                continue;
            }
            SplitTableRegionResponse response = rss.splitRegion(null, request);
            //TODO: should we limit the retry number before quitting?
            if (response == null || (procId = response.getProcId()) == -1) {
                LOG.warn("Failed to split " + regionInfo + " retrying...");
                continue;
            }
            break;
        } catch (ServiceException se) {
            // TODO: retry or just fail
            IOException ioe = ProtobufUtil.getRemoteException(se);
            LOG.info("Failed to split region, will retry", ioe);
            if (rssStub == rss) {
                rssStub = null;
            }
        }
    }
    return procId;
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) RegionServerStatusService(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService) SplitTableRegionRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest) NonceGenerator(org.apache.hadoop.hbase.client.NonceGenerator) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) SplitTableRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse)

Example 84 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class HRegionServer method getLastSequenceId.

@Override
public RegionStoreSequenceIds getLastSequenceId(byte[] encodedRegionName) {
    try {
        GetLastFlushedSequenceIdRequest req = RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName);
        RegionServerStatusService.BlockingInterface rss = rssStub;
        if (rss == null) {
            // Try to connect one more time
            createRegionServerStatusStub();
            rss = rssStub;
            if (rss == null) {
                // Still no luck, we tried
                LOG.warn("Unable to connect to the master to check " + "the last flushed sequence id");
                return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM).build();
            }
        }
        GetLastFlushedSequenceIdResponse resp = rss.getLastFlushedSequenceId(null, req);
        return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(resp.getLastFlushedSequenceId()).addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build();
    } catch (ServiceException e) {
        LOG.warn("Unable to connect to the master to check the last flushed sequence id", e);
        return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM).build();
    }
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) GetLastFlushedSequenceIdResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) RegionServerStatusService(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService) GetLastFlushedSequenceIdRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)

Example 85 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class TestSimpleRegionNormalizer method setupMocksForNormalizer.

protected void setupMocksForNormalizer(Map<byte[], Integer> regionSizes, List<HRegionInfo> hris) {
    masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS);
    masterRpcServices = Mockito.mock(MasterRpcServices.class, RETURNS_DEEP_STUBS);
    // for simplicity all regions are assumed to be on one server; doesn't matter to us
    ServerName sn = ServerName.valueOf("localhost", 0, 1L);
    when(masterServices.getAssignmentManager().getRegionStates().getRegionsOfTable(any(TableName.class))).thenReturn(hris);
    when(masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(any(HRegionInfo.class))).thenReturn(sn);
    for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
        RegionLoad regionLoad = Mockito.mock(RegionLoad.class);
        when(regionLoad.getName()).thenReturn(region.getKey());
        when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue());
        when(masterServices.getServerManager().getLoad(sn).getRegionsLoad().get(region.getKey())).thenReturn(regionLoad);
    }
    try {
        when(masterRpcServices.isSplitOrMergeEnabled(any(RpcController.class), any(IsSplitOrMergeEnabledRequest.class))).thenReturn(IsSplitOrMergeEnabledResponse.newBuilder().setEnabled(true).build());
    } catch (ServiceException se) {
        LOG.debug("error setting isSplitOrMergeEnabled switch", se);
    }
    normalizer.setMasterServices(masterServices);
    normalizer.setMasterRpcServices(masterRpcServices);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) RpcController(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController) TableName(org.apache.hadoop.hbase.TableName) MasterRpcServices(org.apache.hadoop.hbase.master.MasterRpcServices) RegionLoad(org.apache.hadoop.hbase.RegionLoad) IsSplitOrMergeEnabledRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ServerName(org.apache.hadoop.hbase.ServerName) MasterServices(org.apache.hadoop.hbase.master.MasterServices) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)95 IOException (java.io.IOException)76 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)65 InterruptedIOException (java.io.InterruptedIOException)28 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)24 ServerName (org.apache.hadoop.hbase.ServerName)16 QosPriority (org.apache.hadoop.hbase.ipc.QosPriority)15 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)14 ArrayList (java.util.ArrayList)12 HBaseRpcController (org.apache.hadoop.hbase.ipc.HBaseRpcController)12 TableName (org.apache.hadoop.hbase.TableName)10 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)10 Test (org.junit.Test)9 CellScanner (org.apache.hadoop.hbase.CellScanner)5 UnknownRegionException (org.apache.hadoop.hbase.UnknownRegionException)4 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)4 Result (org.apache.hadoop.hbase.client.Result)4 ForeignException (org.apache.hadoop.hbase.errorhandling.ForeignException)4 RpcCallContext (org.apache.hadoop.hbase.ipc.RpcCallContext)4 OperationQuota (org.apache.hadoop.hbase.quotas.OperationQuota)4