Search in sources :

Example 16 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class AssignmentManager method stop.

public void stop() {
    if (!running.compareAndSet(true, false)) {
        return;
    }
    LOG.info("Stopping assignment manager");
    // The AM is started before the procedure executor,
    // but the actual work will be loaded/submitted only once we have the executor
    final boolean hasProcExecutor = master.getMasterProcedureExecutor() != null;
    // Remove the RIT chore
    if (hasProcExecutor) {
        master.getMasterProcedureExecutor().removeChore(this.ritChore);
        if (this.deadMetricChore != null) {
            master.getMasterProcedureExecutor().removeChore(this.deadMetricChore);
        }
    }
    // Stop the Assignment Thread
    stopAssignmentThread();
    // Stop the RegionStateStore
    regionStates.clear();
    // Update meta events (for testing)
    if (hasProcExecutor) {
        metaLoadEvent.suspend();
        for (RegionInfo hri : getMetaRegionSet()) {
            setMetaAssigned(hri, false);
        }
    }
}
Also used : RegionInfo(org.apache.hadoop.hbase.client.RegionInfo)

Example 17 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class MergeTableRegionsProcedure method checkRegionsToMerge.

/**
 * @throws MergeRegionException If unable to merge regions for whatever reasons.
 */
private static void checkRegionsToMerge(MasterProcedureEnv env, final RegionInfo[] regions, final boolean force) throws MergeRegionException {
    long count = Arrays.stream(regions).distinct().count();
    if (regions.length != count) {
        throw new MergeRegionException("Duplicate regions specified; cannot merge a region to " + "itself. Passed in " + regions.length + " but only " + count + " unique.");
    }
    if (count < 2) {
        throw new MergeRegionException("Need two Regions at least to run a Merge");
    }
    RegionInfo previous = null;
    for (RegionInfo ri : regions) {
        if (previous != null) {
            if (!previous.getTable().equals(ri.getTable())) {
                String msg = "Can't merge regions from different tables: " + previous + ", " + ri;
                LOG.warn(msg);
                throw new MergeRegionException(msg);
            }
            if (!force && !ri.isAdjacent(previous) && !ri.isOverlap(previous)) {
                String msg = "Unable to merge non-adjacent or non-overlapping regions '" + previous.getShortNameToLog() + "', '" + ri.getShortNameToLog() + "' when force=false";
                LOG.warn(msg);
                throw new MergeRegionException(msg);
            }
        }
        if (ri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
            throw new MergeRegionException("Can't merge non-default replicas; " + ri);
        }
        try {
            checkOnline(env, ri);
        } catch (DoNotRetryRegionException dnrre) {
            throw new MergeRegionException(dnrre);
        }
        previous = ri;
    }
}
Also used : RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) MergeRegionException(org.apache.hadoop.hbase.exceptions.MergeRegionException) DoNotRetryRegionException(org.apache.hadoop.hbase.client.DoNotRetryRegionException)

Example 18 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class MergeTableRegionsProcedure method prepareMergeRegion.

/**
 * Prepare merge and do some check
 */
private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOException {
    // Fail if we are taking snapshot for the given table
    TableName tn = regionsToMerge[0].getTable();
    if (env.getMasterServices().getSnapshotManager().isTakingSnapshot(tn)) {
        throw new MergeRegionException("Skip merging regions " + RegionInfo.getShortNameToLog(regionsToMerge) + ", because we are snapshotting " + tn);
    }
    // the switch was set to false after submit.
    if (!env.getMasterServices().isSplitOrMergeEnabled(MasterSwitchType.MERGE)) {
        String regionsStr = Arrays.deepToString(this.regionsToMerge);
        LOG.warn("Merge switch is off! skip merge of " + regionsStr);
        setFailure(getClass().getSimpleName(), new IOException("Merge of " + regionsStr + " failed because merge switch is off"));
        return false;
    }
    if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isMergeEnabled()) {
        String regionsStr = Arrays.deepToString(regionsToMerge);
        LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionsStr);
        setFailure(getClass().getSimpleName(), new IOException("Merge of " + regionsStr + " failed as region merge is disabled for the table"));
        return false;
    }
    RegionStates regionStates = env.getAssignmentManager().getRegionStates();
    RegionStateStore regionStateStore = env.getAssignmentManager().getRegionStateStore();
    for (RegionInfo ri : this.regionsToMerge) {
        if (regionStateStore.hasMergeRegions(ri)) {
            String msg = "Skip merging " + RegionInfo.getShortNameToLog(regionsToMerge) + ", because a parent, " + RegionInfo.getShortNameToLog(ri) + ", has a merge qualifier " + "(if a 'merge column' in parent, it was recently merged but still has outstanding " + "references to its parents that must be cleared before it can participate in merge -- " + "major compact it to hurry clearing of its references)";
            LOG.warn(msg);
            throw new MergeRegionException(msg);
        }
        RegionState state = regionStates.getRegionState(ri.getEncodedName());
        if (state == null) {
            throw new UnknownRegionException(RegionInfo.getShortNameToLog(ri) + " UNKNOWN (Has it been garbage collected?)");
        }
        if (!state.isOpened()) {
            throw new MergeRegionException("Unable to merge regions that are NOT online: " + ri);
        }
        // along with the failure, so we can see why regions are not mergeable at this time.
        try {
            if (!isMergeable(env, state)) {
                setFailure(getClass().getSimpleName(), new MergeRegionException("Skip merging " + RegionInfo.getShortNameToLog(regionsToMerge) + ", because a parent, " + RegionInfo.getShortNameToLog(ri) + ", is not mergeable"));
                return false;
            }
        } catch (IOException e) {
            IOException ioe = new IOException(RegionInfo.getShortNameToLog(ri) + " NOT mergeable", e);
            setFailure(getClass().getSimpleName(), ioe);
            return false;
        }
    }
    // Update region states to Merging
    setRegionStateToMerging(env);
    return true;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionState(org.apache.hadoop.hbase.master.RegionState) UnknownRegionException(org.apache.hadoop.hbase.UnknownRegionException) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) MergeRegionException(org.apache.hadoop.hbase.exceptions.MergeRegionException) IOException(java.io.IOException)

Example 19 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class MergeTableRegionsProcedure method createMergedRegionInfo.

/**
 * Create merged region info by looking at passed in <code>regionsToMerge</code>
 * to figure what extremes for start and end keys to use; merged region needs
 * to have an extent sufficient to cover all regions-to-merge.
 */
private static RegionInfo createMergedRegionInfo(final RegionInfo[] regionsToMerge) {
    byte[] lowestStartKey = null;
    byte[] highestEndKey = null;
    // Region Id is a timestamp. Merged region's id can't be less than that of
    // merging regions else will insert at wrong location in hbase:meta (See HBASE-710).
    long highestRegionId = -1;
    for (RegionInfo ri : regionsToMerge) {
        if (lowestStartKey == null) {
            lowestStartKey = ri.getStartKey();
        } else if (Bytes.compareTo(ri.getStartKey(), lowestStartKey) < 0) {
            lowestStartKey = ri.getStartKey();
        }
        if (highestEndKey == null) {
            highestEndKey = ri.getEndKey();
        } else if (ri.isLast() || Bytes.compareTo(ri.getEndKey(), highestEndKey) > 0) {
            highestEndKey = ri.getEndKey();
        }
        highestRegionId = ri.getRegionId() > highestRegionId ? ri.getRegionId() : highestRegionId;
    }
    // Merged region is sorted between two merging regions in META
    return RegionInfoBuilder.newBuilder(regionsToMerge[0].getTable()).setStartKey(lowestStartKey).setEndKey(highestEndKey).setSplit(false).setRegionId(highestRegionId + 1).build();
}
Also used : RegionInfo(org.apache.hadoop.hbase.client.RegionInfo)

Example 20 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class MergeTableRegionsProcedure method createMergedRegion.

/**
 * Create merged region.
 * The way the merge works is that we make a 'merges' temporary
 * directory in the FIRST parent region to merge (Do not change this without
 * also changing the rollback where we look in this FIRST region for the
 * merge dir). We then collect here references to all the store files in all
 * the parent regions including those of the FIRST parent region into a
 * subdirectory, named for the resultant merged region. We then call
 * commitMergeRegion. It finds this subdirectory of storefile references
 * and moves them under the new merge region (creating the region layout
 * as side effect). After assign of the new merge region, we will run a
 * compaction. This will undo the references but the reference files remain
 * in place until the archiver runs (which it does on a period as a chore
 * in the RegionServer that hosts the merge region -- see
 * CompactedHFilesDischarger). Once the archiver has moved aside the
 * no-longer used references, the merge region no longer has references.
 * The catalog janitor will notice when it runs next and it will remove
 * the old parent regions.
 */
private void createMergedRegion(final MasterProcedureEnv env) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), regionsToMerge[0].getTable());
    final FileSystem fs = mfs.getFileSystem();
    List<Path> mergedFiles = new ArrayList<>();
    HRegionFileSystem mergeRegionFs = HRegionFileSystem.createRegionOnFileSystem(env.getMasterConfiguration(), fs, tableDir, mergedRegion);
    for (RegionInfo ri : this.regionsToMerge) {
        HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(env.getMasterConfiguration(), fs, tableDir, ri, false);
        mergedFiles.addAll(mergeStoreFiles(env, regionFs, mergeRegionFs, mergedRegion));
    }
    assert mergeRegionFs != null;
    mergeRegionFs.commitMergedRegion(mergedFiles, env);
    // Prepare to create merged regions
    env.getAssignmentManager().getRegionStates().getOrCreateRegionStateNode(mergedRegion).setState(State.MERGING_NEW);
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo)

Aggregations

RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)824 Test (org.junit.Test)416 TableName (org.apache.hadoop.hbase.TableName)311 ServerName (org.apache.hadoop.hbase.ServerName)191 ArrayList (java.util.ArrayList)175 IOException (java.io.IOException)174 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)174 Path (org.apache.hadoop.fs.Path)141 List (java.util.List)118 HashMap (java.util.HashMap)90 Table (org.apache.hadoop.hbase.client.Table)90 Map (java.util.Map)81 Put (org.apache.hadoop.hbase.client.Put)81 Configuration (org.apache.hadoop.conf.Configuration)80 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)67 TreeMap (java.util.TreeMap)66 Result (org.apache.hadoop.hbase.client.Result)59 FileSystem (org.apache.hadoop.fs.FileSystem)58 Cell (org.apache.hadoop.hbase.Cell)50 Scan (org.apache.hadoop.hbase.client.Scan)46