Search in sources :

Example 61 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class MasterRpcServices method reportRegionStateTransition.

@Override
public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c, ReportRegionStateTransitionRequest req) throws ServiceException {
    try {
        master.checkServiceStarted();
        RegionStateTransition rt = req.getTransition(0);
        RegionStates regionStates = master.getAssignmentManager().getRegionStates();
        for (RegionInfo ri : rt.getRegionInfoList()) {
            TableName tableName = ProtobufUtil.toTableName(ri.getTableName());
            if (!(TableName.META_TABLE_NAME.equals(tableName) && regionStates.getRegionState(HRegionInfo.FIRST_META_REGIONINFO) != null) && !master.getAssignmentManager().isFailoverCleanupDone()) {
                // failover cleanup. So no need this check for it
                throw new PleaseHoldException("Master is rebuilding user regions");
            }
        }
        ServerName sn = ProtobufUtil.toServerName(req.getServer());
        String error = master.getAssignmentManager().onRegionTransition(sn, rt);
        ReportRegionStateTransitionResponse.Builder rrtr = ReportRegionStateTransitionResponse.newBuilder();
        if (error != null) {
            rrtr.setErrorMessage(error);
        }
        return rrtr.build();
    } catch (IOException ioe) {
        throw new ServiceException(ioe);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) PleaseHoldException(org.apache.hadoop.hbase.PleaseHoldException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ServerName(org.apache.hadoop.hbase.ServerName) RegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) RegionStateTransition(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition) ReportRegionStateTransitionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse)

Example 62 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class MasterRpcServices method regionServerReport.

@Override
public RegionServerReportResponse regionServerReport(RpcController controller, RegionServerReportRequest request) throws ServiceException {
    try {
        master.checkServiceStarted();
        ClusterStatusProtos.ServerLoad sl = request.getLoad();
        ServerName serverName = ProtobufUtil.toServerName(request.getServer());
        ServerLoad oldLoad = master.getServerManager().getLoad(serverName);
        master.getServerManager().regionServerReport(serverName, new ServerLoad(sl));
        if (sl != null && master.metricsMaster != null) {
            // Up our metrics.
            master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getTotalNumberOfRequests() : 0));
        }
    } catch (IOException ioe) {
        throw new ServiceException(ioe);
    }
    return RegionServerReportResponse.newBuilder().build();
}
Also used : ServerLoad(org.apache.hadoop.hbase.ServerLoad) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ServerName(org.apache.hadoop.hbase.ServerName) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 63 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class MasterRpcServices method createTable.

@Override
public CreateTableResponse createTable(RpcController controller, CreateTableRequest req) throws ServiceException {
    HTableDescriptor hTableDescriptor = ProtobufUtil.convertToHTableDesc(req.getTableSchema());
    byte[][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
    try {
        long procId = master.createTable(hTableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
        return CreateTableResponse.newBuilder().setProcId(procId).build();
    } catch (IOException ioe) {
        throw new ServiceException(ioe);
    }
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 64 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class HBaseAdmin method getCompactionStateForRegion.

@Override
public CompactionState getCompactionStateForRegion(final byte[] regionName) throws IOException {
    final Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
    if (regionServerPair == null) {
        throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
    }
    if (regionServerPair.getSecond() == null) {
        throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
    }
    ServerName sn = regionServerPair.getSecond();
    final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
    // TODO: There is no timeout on this controller. Set one!
    HBaseRpcController controller = rpcControllerFactory.newController();
    GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(regionServerPair.getFirst().getRegionName(), true);
    GetRegionInfoResponse response;
    try {
        response = admin.getRegionInfo(controller, request);
    } catch (ServiceException e) {
        throw ProtobufUtil.handleRemoteException(e);
    }
    if (response.getCompactionState() != null) {
        return ProtobufUtil.createCompactionState(response.getCompactionState());
    }
    return null;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) AdminService(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService) GetRegionInfoRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) GetRegionInfoResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse) ServerName(org.apache.hadoop.hbase.ServerName)

Example 65 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class RSRpcServices method openRegion.

/**
   * Open asynchronously a region or a set of regions on the region server.
   *
   * The opening is coordinated by ZooKeeper, and this method requires the znode to be created
   *  before being called. As a consequence, this method should be called only from the master.
   * <p>
   * Different manages states for the region are:
   * </p><ul>
   *  <li>region not opened: the region opening will start asynchronously.</li>
   *  <li>a close is already in progress: this is considered as an error.</li>
   *  <li>an open is already in progress: this new open request will be ignored. This is important
   *  because the Master can do multiple requests if it crashes.</li>
   *  <li>the region is already opened:  this new open request will be ignored.</li>
   *  </ul>
   * <p>
   * Bulk assign: If there are more than 1 region to open, it will be considered as a bulk assign.
   * For a single region opening, errors are sent through a ServiceException. For bulk assign,
   * errors are put in the response as FAILED_OPENING.
   * </p>
   * @param controller the RPC controller
   * @param request the request
   * @throws ServiceException
   */
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public OpenRegionResponse openRegion(final RpcController controller, final OpenRegionRequest request) throws ServiceException {
    requestCount.increment();
    if (request.hasServerStartCode()) {
        // check that we are the same server that this RPC is intended for.
        long serverStartCode = request.getServerStartCode();
        if (regionServer.serverName.getStartcode() != serverStartCode) {
            throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " + "different server with startCode: " + serverStartCode + ", this server is: " + regionServer.serverName));
        }
    }
    OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
    final int regionCount = request.getOpenInfoCount();
    final Map<TableName, HTableDescriptor> htds = new HashMap<>(regionCount);
    final boolean isBulkAssign = regionCount > 1;
    try {
        checkOpen();
    } catch (IOException ie) {
        TableName tableName = null;
        if (regionCount == 1) {
            RegionInfo ri = request.getOpenInfo(0).getRegion();
            if (ri != null) {
                tableName = ProtobufUtil.toTableName(ri.getTableName());
            }
        }
        if (!TableName.META_TABLE_NAME.equals(tableName)) {
            throw new ServiceException(ie);
        }
        // We are assigning meta, wait a little for regionserver to finish initialization.
        int timeout = regionServer.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> // Quarter of RPC timeout
        2;
        long endTime = System.currentTimeMillis() + timeout;
        synchronized (regionServer.online) {
            try {
                while (System.currentTimeMillis() <= endTime && !regionServer.isStopped() && !regionServer.isOnline()) {
                    regionServer.online.wait(regionServer.msgInterval);
                }
                checkOpen();
            } catch (InterruptedException t) {
                Thread.currentThread().interrupt();
                throw new ServiceException(t);
            } catch (IOException e) {
                throw new ServiceException(e);
            }
        }
    }
    long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
    for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
        final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
        HTableDescriptor htd;
        try {
            String encodedName = region.getEncodedName();
            byte[] encodedNameBytes = region.getEncodedNameAsBytes();
            final Region onlineRegion = regionServer.getFromOnlineRegions(encodedName);
            if (onlineRegion != null) {
                // The region is already online. This should not happen any more.
                String error = "Received OPEN for the region:" + region.getRegionNameAsString() + ", which is already online";
                regionServer.abort(error);
                throw new IOException(error);
            }
            LOG.info("Open " + region.getRegionNameAsString());
            final Boolean previous = regionServer.regionsInTransitionInRS.putIfAbsent(encodedNameBytes, Boolean.TRUE);
            if (Boolean.FALSE.equals(previous)) {
                if (regionServer.getFromOnlineRegions(encodedName) != null) {
                    // There is a close in progress. This should not happen any more.
                    String error = "Received OPEN for the region:" + region.getRegionNameAsString() + ", which we are already trying to CLOSE";
                    regionServer.abort(error);
                    throw new IOException(error);
                }
                regionServer.regionsInTransitionInRS.put(encodedNameBytes, Boolean.TRUE);
            }
            if (Boolean.TRUE.equals(previous)) {
                // An open is in progress. This is supported, but let's log this.
                LOG.info("Receiving OPEN for the region:" + region.getRegionNameAsString() + ", which we are already trying to OPEN" + " - ignoring this new request for this region.");
            }
            // We are opening this region. If it moves back and forth for whatever reason, we don't
            // want to keep returning the stale moved record while we are opening/if we close again.
            regionServer.removeFromMovedRegions(region.getEncodedName());
            if (previous == null || !previous.booleanValue()) {
                // check if the region to be opened is marked in recovering state in ZK
                if (ZKSplitLog.isRegionMarkedRecoveringInZK(regionServer.getZooKeeper(), region.getEncodedName())) {
                    // rolling restart/upgrade where we want to Master/RS see same configuration
                    if (!regionOpenInfo.hasOpenForDistributedLogReplay() || regionOpenInfo.getOpenForDistributedLogReplay()) {
                        regionServer.recoveringRegions.put(region.getEncodedName(), null);
                    } else {
                        // Remove stale recovery region from ZK when we open region not for recovering which
                        // could happen when turn distributedLogReplay off from on.
                        List<String> tmpRegions = new ArrayList<>();
                        tmpRegions.add(region.getEncodedName());
                        ZKSplitLog.deleteRecoveringRegionZNodes(regionServer.getZooKeeper(), tmpRegions);
                    }
                }
                htd = htds.get(region.getTable());
                if (htd == null) {
                    htd = regionServer.tableDescriptors.get(region.getTable());
                    htds.put(region.getTable(), htd);
                }
                if (htd == null) {
                    throw new IOException("Missing table descriptor for " + region.getEncodedName());
                }
                // Need to pass the expected version in the constructor.
                if (region.isMetaRegion()) {
                    regionServer.service.submit(new OpenMetaHandler(regionServer, regionServer, region, htd, masterSystemTime));
                } else {
                    if (regionOpenInfo.getFavoredNodesCount() > 0) {
                        regionServer.updateRegionFavoredNodesMapping(region.getEncodedName(), regionOpenInfo.getFavoredNodesList());
                    }
                    if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) {
                        regionServer.service.submit(new OpenPriorityRegionHandler(regionServer, regionServer, region, htd, masterSystemTime));
                    } else {
                        regionServer.service.submit(new OpenRegionHandler(regionServer, regionServer, region, htd, masterSystemTime));
                    }
                }
            }
            builder.addOpeningState(RegionOpeningState.OPENED);
        } catch (KeeperException zooKeeperEx) {
            LOG.error("Can't retrieve recovering state from zookeeper", zooKeeperEx);
            throw new ServiceException(zooKeeperEx);
        } catch (IOException ie) {
            LOG.warn("Failed opening region " + region.getRegionNameAsString(), ie);
            if (isBulkAssign) {
                builder.addOpeningState(RegionOpeningState.FAILED_OPENING);
            } else {
                throw new ServiceException(ie);
            }
        }
    }
    return builder.build();
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) RegionOpenInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) OpenRegionHandler(org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler) OpenPriorityRegionHandler(org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler) OpenMetaHandler(org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler) OpenRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) KeeperException(org.apache.zookeeper.KeeperException) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Aggregations

ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)95 IOException (java.io.IOException)76 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)65 InterruptedIOException (java.io.InterruptedIOException)28 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)24 ServerName (org.apache.hadoop.hbase.ServerName)16 QosPriority (org.apache.hadoop.hbase.ipc.QosPriority)15 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)14 ArrayList (java.util.ArrayList)12 HBaseRpcController (org.apache.hadoop.hbase.ipc.HBaseRpcController)12 TableName (org.apache.hadoop.hbase.TableName)10 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)10 Test (org.junit.Test)9 CellScanner (org.apache.hadoop.hbase.CellScanner)5 UnknownRegionException (org.apache.hadoop.hbase.UnknownRegionException)4 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)4 Result (org.apache.hadoop.hbase.client.Result)4 ForeignException (org.apache.hadoop.hbase.errorhandling.ForeignException)4 RpcCallContext (org.apache.hadoop.hbase.ipc.RpcCallContext)4 OperationQuota (org.apache.hadoop.hbase.quotas.OperationQuota)4