Search in sources :

Example 1 with Pair

use of org.apache.hadoop.hbase.util.Pair in project hbase by apache.

the class ConnectionImplementation method isTableAvailable.

@Override
public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) throws IOException {
    if (this.closed) {
        throw new IOException(toString() + " closed");
    }
    try {
        if (!isTableEnabled(tableName)) {
            LOG.debug("Table " + tableName + " not enabled");
            return false;
        }
        List<Pair<HRegionInfo, ServerName>> locations = MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true);
        int notDeployed = 0;
        int regionCount = 0;
        for (Pair<HRegionInfo, ServerName> pair : locations) {
            HRegionInfo info = pair.getFirst();
            if (pair.getSecond() == null) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Table " + tableName + " has not deployed region " + pair.getFirst().getEncodedName());
                }
                notDeployed++;
            } else if (splitKeys != null && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
                for (byte[] splitKey : splitKeys) {
                    // Just check if the splitkey is available
                    if (Bytes.equals(info.getStartKey(), splitKey)) {
                        regionCount++;
                        break;
                    }
                }
            } else {
                // Always empty start row should be counted
                regionCount++;
            }
        }
        if (notDeployed > 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Table " + tableName + " has " + notDeployed + " regions");
            }
            return false;
        } else if (splitKeys != null && regionCount != splitKeys.length + 1) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Table " + tableName + " expected to have " + (splitKeys.length + 1) + " regions, but only " + regionCount + " available");
            }
            return false;
        } else {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Table " + tableName + " should be available");
            }
            return true;
        }
    } catch (TableNotFoundException tnfe) {
        LOG.warn("Table " + tableName + " not enabled, it is not exists");
        return false;
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) ServerName(org.apache.hadoop.hbase.ServerName) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Pair(org.apache.hadoop.hbase.util.Pair)

Example 2 with Pair

use of org.apache.hadoop.hbase.util.Pair in project hbase by apache.

the class MetaTableAccessor method getRegionsFromMergeQualifier.

/**
   * Get regions from the merge qualifier of the specified merged region
   * @return null if it doesn't contain merge qualifier, else two merge regions
   * @throws IOException
   */
@Nullable
public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(Connection connection, byte[] regionName) throws IOException {
    Result result = getRegionResult(connection, regionName);
    HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
    HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
    if (mergeA == null && mergeB == null) {
        return null;
    }
    return new Pair<>(mergeA, mergeB);
}
Also used : Result(org.apache.hadoop.hbase.client.Result) Pair(org.apache.hadoop.hbase.util.Pair) Nullable(edu.umd.cs.findbugs.annotations.Nullable)

Example 3 with Pair

use of org.apache.hadoop.hbase.util.Pair in project hbase by apache.

the class AsyncMetaTableAccessor method getRegion.

public static CompletableFuture<Pair<HRegionInfo, ServerName>> getRegion(RawAsyncTable metaTable, byte[] regionName) {
    CompletableFuture<Pair<HRegionInfo, ServerName>> future = new CompletableFuture<>();
    byte[] row = regionName;
    HRegionInfo parsedInfo = null;
    try {
        parsedInfo = MetaTableAccessor.parseRegionInfoFromRegionName(regionName);
        row = MetaTableAccessor.getMetaKeyForRegion(parsedInfo);
    } catch (Exception parseEx) {
    // Ignore if regionName is a encoded region name.
    }
    final HRegionInfo finalHRI = parsedInfo;
    metaTable.get(new Get(row).addFamily(HConstants.CATALOG_FAMILY)).whenComplete((r, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        RegionLocations locations = MetaTableAccessor.getRegionLocations(r);
        HRegionLocation hrl = locations == null ? null : locations.getRegionLocation(finalHRI == null ? 0 : finalHRI.getReplicaId());
        if (hrl == null) {
            future.complete(null);
        } else {
            future.complete(new Pair<>(hrl.getRegionInfo(), hrl.getServerName()));
        }
    });
    return future;
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) Get(org.apache.hadoop.hbase.client.Get) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) Pair(org.apache.hadoop.hbase.util.Pair)

Example 4 with Pair

use of org.apache.hadoop.hbase.util.Pair in project hbase by apache.

the class AssignmentManager method assign.

/**
   * Bulk assign regions to <code>destination</code>.
   * @param destination
   * @param regions Regions to assign.
   * @return true if successful
   */
boolean assign(final ServerName destination, final List<HRegionInfo> regions) throws InterruptedException {
    long startTime = EnvironmentEdgeManager.currentTime();
    try {
        int regionCount = regions.size();
        if (regionCount == 0) {
            return true;
        }
        LOG.info("Assigning " + regionCount + " region(s) to " + destination.toString());
        Set<String> encodedNames = new HashSet<>(regionCount);
        for (HRegionInfo region : regions) {
            encodedNames.add(region.getEncodedName());
        }
        List<HRegionInfo> failedToOpenRegions = new ArrayList<>();
        Map<String, Lock> locks = locker.acquireLocks(encodedNames);
        try {
            Map<String, RegionPlan> plans = new HashMap<>(regionCount);
            List<RegionState> states = new ArrayList<>(regionCount);
            for (HRegionInfo region : regions) {
                String encodedName = region.getEncodedName();
                if (!isDisabledorDisablingRegionInRIT(region)) {
                    RegionState state = forceRegionStateToOffline(region, false);
                    boolean onDeadServer = false;
                    if (state != null) {
                        if (regionStates.wasRegionOnDeadServer(encodedName)) {
                            LOG.info("Skip assigning " + region.getRegionNameAsString() + ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName) + " is dead but not processed yet");
                            onDeadServer = true;
                        } else {
                            RegionPlan plan = new RegionPlan(region, state.getServerName(), destination);
                            plans.put(encodedName, plan);
                            states.add(state);
                            continue;
                        }
                    }
                    // Reassign if the region wasn't on a dead server
                    if (!onDeadServer) {
                        LOG.info("failed to force region state to offline, " + "will reassign later: " + region);
                        // assign individually later
                        failedToOpenRegions.add(region);
                    }
                }
                // Release the lock, this region is excluded from bulk assign because
                // we can't update its state, or set its znode to offline.
                Lock lock = locks.remove(encodedName);
                lock.unlock();
            }
            if (server.isStopped()) {
                return false;
            }
            // Add region plans, so we can updateTimers when one region is opened so
            // that unnecessary timeout on RIT is reduced.
            this.addPlans(plans);
            List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos = new ArrayList<>(states.size());
            for (RegionState state : states) {
                HRegionInfo region = state.getRegion();
                regionStates.updateRegionState(region, State.PENDING_OPEN, destination);
                List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
                if (shouldAssignFavoredNodes(region)) {
                    favoredNodes = server.getFavoredNodesManager().getFavoredNodesWithDNPort(region);
                }
                regionOpenInfos.add(new Pair<>(region, favoredNodes));
            }
            // Move on to open regions.
            try {
                // Send OPEN RPC. If it fails on a IOE or RemoteException,
                // regions will be assigned individually.
                Configuration conf = server.getConfiguration();
                long maxWaitTime = System.currentTimeMillis() + conf.getLong("hbase.regionserver.rpc.startup.waittime", 60000);
                for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) {
                    try {
                        List<RegionOpeningState> regionOpeningStateList = serverManager.sendRegionOpen(destination, regionOpenInfos);
                        for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) {
                            RegionOpeningState openingState = regionOpeningStateList.get(k);
                            if (openingState != RegionOpeningState.OPENED) {
                                HRegionInfo region = regionOpenInfos.get(k).getFirst();
                                LOG.info("Got opening state " + openingState + ", will reassign later: " + region);
                                // Failed opening this region, reassign it later
                                forceRegionStateToOffline(region, true);
                                failedToOpenRegions.add(region);
                            }
                        }
                        break;
                    } catch (IOException e) {
                        if (e instanceof RemoteException) {
                            e = ((RemoteException) e).unwrapRemoteException();
                        }
                        if (e instanceof RegionServerStoppedException) {
                            LOG.warn("The region server was shut down, ", e);
                            // No need to retry, the region server is a goner.
                            return false;
                        } else if (e instanceof ServerNotRunningYetException) {
                            long now = System.currentTimeMillis();
                            if (now < maxWaitTime) {
                                if (LOG.isDebugEnabled()) {
                                    LOG.debug("Server is not yet up; waiting up to " + (maxWaitTime - now) + "ms", e);
                                }
                                Thread.sleep(100);
                                // reset the try count
                                i--;
                                continue;
                            }
                        } else if (e instanceof java.net.SocketTimeoutException && this.serverManager.isServerOnline(destination)) {
                            // open the region on the same server.
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("Bulk assigner openRegion() to " + destination + " has timed out, but the regions might" + " already be opened on it.", e);
                            }
                            // wait and reset the re-try count, server might be just busy.
                            Thread.sleep(100);
                            i--;
                            continue;
                        } else if (e instanceof FailedServerException && i < maximumAttempts) {
                            // In case the server is in the failed server list, no point to
                            // retry too soon. Retry after the failed_server_expiry time
                            long sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, RpcClient.FAILED_SERVER_EXPIRY_DEFAULT);
                            if (LOG.isDebugEnabled()) {
                                LOG.debug(destination + " is on failed server list; waiting " + sleepTime + "ms", e);
                            }
                            Thread.sleep(sleepTime);
                            continue;
                        }
                        throw e;
                    }
                }
            } catch (IOException e) {
                // Can be a socket timeout, EOF, NoRouteToHost, etc
                LOG.info("Unable to communicate with " + destination + " in order to assign regions, ", e);
                for (RegionState state : states) {
                    HRegionInfo region = state.getRegion();
                    forceRegionStateToOffline(region, true);
                }
                return false;
            }
        } finally {
            for (Lock lock : locks.values()) {
                lock.unlock();
            }
        }
        if (!failedToOpenRegions.isEmpty()) {
            for (HRegionInfo region : failedToOpenRegions) {
                if (!regionStates.isRegionOnline(region)) {
                    invokeAssign(region);
                }
            }
        }
        // wait for assignment completion
        ArrayList<HRegionInfo> userRegionSet = new ArrayList<>(regions.size());
        for (HRegionInfo region : regions) {
            if (!region.getTable().isSystemTable()) {
                userRegionSet.add(region);
            }
        }
        if (!waitForAssignment(userRegionSet, true, userRegionSet.size(), System.currentTimeMillis())) {
            LOG.debug("some user regions are still in transition: " + userRegionSet);
        }
        LOG.debug("Bulk assigning done for " + destination);
        return true;
    } finally {
        metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTime() - startTime);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) FailedServerException(org.apache.hadoop.hbase.ipc.FailedServerException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) RegionServerStoppedException(org.apache.hadoop.hbase.regionserver.RegionServerStoppedException) HashSet(java.util.HashSet) Pair(org.apache.hadoop.hbase.util.Pair) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) IOException(java.io.IOException) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Lock(java.util.concurrent.locks.Lock) ServerName(org.apache.hadoop.hbase.ServerName) RegionOpeningState(org.apache.hadoop.hbase.regionserver.RegionOpeningState) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 5 with Pair

use of org.apache.hadoop.hbase.util.Pair in project hbase by apache.

the class RegionPlacementMaintainer method updateAssignmentPlanToRegionServers.

/**
   * Update the assignment plan to all the region servers
   * @param plan
   * @throws IOException
   */
private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws IOException {
    LOG.info("Start to update the region servers with the new assignment plan");
    // Get the region to region server map
    Map<ServerName, List<HRegionInfo>> currentAssignment = this.getRegionAssignmentSnapshot().getRegionServerToRegionMap();
    // track of the failed and succeeded updates
    int succeededNum = 0;
    Map<ServerName, Exception> failedUpdateMap = new HashMap<>();
    for (Map.Entry<ServerName, List<HRegionInfo>> entry : currentAssignment.entrySet()) {
        List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
        try {
            // Keep track of the favored updates for the current region server
            FavoredNodesPlan singleServerPlan = null;
            // Find out all the updates for the current region server
            for (HRegionInfo region : entry.getValue()) {
                List<ServerName> favoredServerList = plan.getFavoredNodes(region);
                if (favoredServerList != null && favoredServerList.size() == FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
                    // Create the single server plan if necessary
                    if (singleServerPlan == null) {
                        singleServerPlan = new FavoredNodesPlan();
                    }
                    // Update the single server update
                    singleServerPlan.updateFavoredNodesMap(region, favoredServerList);
                    regionUpdateInfos.add(new Pair<>(region, favoredServerList));
                }
            }
            if (singleServerPlan != null) {
                // Update the current region server with its updated favored nodes
                BlockingInterface currentRegionServer = ((ClusterConnection) this.connection).getAdmin(entry.getKey());
                UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
                UpdateFavoredNodesResponse updateFavoredNodesResponse = currentRegionServer.updateFavoredNodes(null, request);
                LOG.info("Region server " + ProtobufUtil.getServerInfo(null, currentRegionServer).getServerName() + " has updated " + updateFavoredNodesResponse.getResponse() + " / " + singleServerPlan.getAssignmentMap().size() + " regions with the assignment plan");
                succeededNum++;
            }
        } catch (Exception e) {
            failedUpdateMap.put(entry.getKey(), e);
        }
    }
    // log the succeeded updates
    LOG.info("Updated " + succeededNum + " region servers with " + "the new assignment plan");
    // log the failed updates
    int failedNum = failedUpdateMap.size();
    if (failedNum != 0) {
        LOG.error("Failed to update the following + " + failedNum + " region servers with its corresponding favored nodes");
        for (Map.Entry<ServerName, Exception> entry : failedUpdateMap.entrySet()) {
            LOG.error("Failed to update " + entry.getKey().getHostAndPort() + " because of " + entry.getValue().getMessage());
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FavoredNodesPlan(org.apache.hadoop.hbase.favored.FavoredNodesPlan) IOException(java.io.IOException) ParseException(org.apache.commons.cli.ParseException) UpdateFavoredNodesResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) UpdateFavoredNodesRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) BlockingInterface(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) Pair(org.apache.hadoop.hbase.util.Pair)

Aggregations

Pair (org.apache.hadoop.hbase.util.Pair)186 ArrayList (java.util.ArrayList)85 Test (org.junit.Test)48 IOException (java.io.IOException)44 List (java.util.List)41 Put (org.apache.hadoop.hbase.client.Put)31 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)30 HashMap (java.util.HashMap)28 ServerName (org.apache.hadoop.hbase.ServerName)24 Path (org.apache.hadoop.fs.Path)23 KeyValue (org.apache.hadoop.hbase.KeyValue)23 Mutation (org.apache.hadoop.hbase.client.Mutation)23 Connection (java.sql.Connection)17 Map (java.util.Map)17 Scan (org.apache.hadoop.hbase.client.Scan)16 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)15 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)15 TableName (org.apache.hadoop.hbase.TableName)14 InterruptedIOException (java.io.InterruptedIOException)13 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)12