Search in sources :

Example 31 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestAccessController method testListProcedures.

@Test
public void testListProcedures() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final ProcedureExecutor<MasterProcedureEnv> procExec = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
    Procedure proc = new TestTableDDLProcedure(procExec.getEnvironment(), tableName);
    proc.setOwner(USER_OWNER);
    final long procId = procExec.submitProcedure(proc);
    final List<ProcedureInfo> procInfoList = procExec.listProcedures();
    AccessTestAction listProceduresAction = new AccessTestAction() {

        @Override
        public Object run() throws Exception {
            List<ProcedureInfo> procInfoListClone = new ArrayList<>(procInfoList.size());
            for (ProcedureInfo pi : procInfoList) {
                procInfoListClone.add(pi.clone());
            }
            ACCESS_CONTROLLER.postListProcedures(ObserverContext.createAndPrepare(CP_ENV, null), procInfoListClone);
            return null;
        }
    };
    verifyAllowed(listProceduresAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN);
    verifyAllowed(listProceduresAction, USER_OWNER);
    verifyIfNull(listProceduresAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ProcedureInfo(org.apache.hadoop.hbase.ProcedureInfo) ArrayList(java.util.ArrayList) LockProcedure(org.apache.hadoop.hbase.master.locking.LockProcedure) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Test(org.junit.Test)

Example 32 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class HMaster method startProcedureExecutor.

private void startProcedureExecutor() throws IOException {
    final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
    final Path walDir = new Path(FSUtils.getWALRootDir(this.conf), MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
    procedureStore = new WALProcedureStore(conf, walDir.getFileSystem(conf), walDir, new MasterProcedureEnv.WALStoreLeaseRecovery(this));
    procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
    procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore, procEnv.getProcedureScheduler());
    configurationManager.registerObserver(procEnv);
    final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, Math.max(Runtime.getRuntime().availableProcessors(), MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
    final boolean abortOnCorruption = conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
    procedureStore.start(numThreads);
    procedureExecutor.start(numThreads, abortOnCorruption);
}
Also used : Path(org.apache.hadoop.fs.Path) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) WALProcedureStore(org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore)

Example 33 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class AssignmentManagerUtil method removeNonDefaultReplicas.

static void removeNonDefaultReplicas(MasterProcedureEnv env, Stream<RegionInfo> regions, int regionReplication) {
    // Remove from in-memory states
    regions.flatMap(hri -> IntStream.range(1, regionReplication).mapToObj(i -> RegionReplicaUtil.getRegionInfoForReplica(hri, i))).forEach(hri -> {
        env.getAssignmentManager().getRegionStates().deleteRegion(hri);
        env.getMasterServices().getServerManager().removeRegion(hri);
        FavoredNodesManager fnm = env.getMasterServices().getFavoredNodesManager();
        if (fnm != null) {
            fnm.deleteFavoredNodesForRegions(Collections.singletonList(hri));
        }
    });
}
Also used : IntStream(java.util.stream.IntStream) DEFAULT_HBASE_ENABLE_SEPARATE_CHILD_REGIONS(org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_ENABLE_SEPARATE_CHILD_REGIONS) ListIterator(java.util.ListIterator) GetRegionInfoResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse) FavoredNodesManager(org.apache.hadoop.hbase.favored.FavoredNodesManager) RegionReplicaUtil(org.apache.hadoop.hbase.client.RegionReplicaUtil) WALSplitUtil(org.apache.hadoop.hbase.wal.WALSplitUtil) IOException(java.io.IOException) ArrayUtils(org.apache.commons.lang3.ArrayUtils) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Collectors(java.util.stream.Collectors) RequestConverter(org.apache.hadoop.hbase.shaded.protobuf.RequestConverter) ArrayList(java.util.ArrayList) GetRegionInfoRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest) List(java.util.List) HConstants(org.apache.hadoop.hbase.HConstants) Stream(java.util.stream.Stream) FutureUtils(org.apache.hadoop.hbase.util.FutureUtils) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) AsyncRegionServerAdmin(org.apache.hadoop.hbase.client.AsyncRegionServerAdmin) Collections(java.util.Collections) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ServerName(org.apache.hadoop.hbase.ServerName) FavoredNodesManager(org.apache.hadoop.hbase.favored.FavoredNodesManager)

Example 34 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class AssignmentManagerUtil method createAssignProcedures.

/**
 * Create assign procedures for the give regions, according to the {@code regionReplication}.
 * <p/>
 * For rolling back, we will submit procedures directly to the {@code ProcedureExecutor}, so it is
 * possible that we persist the newly scheduled procedures, and then crash before persisting the
 * rollback state, so when we arrive here the second time, it is possible that some regions have
 * already been associated with a TRSP.
 * @param ignoreIfInTransition if true, will skip creating TRSP for the given region if it is
 *          already in transition, otherwise we will add an assert that it should not in
 *          transition.
 */
private static TransitRegionStateProcedure[] createAssignProcedures(MasterProcedureEnv env, List<RegionInfo> regions, int regionReplication, ServerName targetServer, boolean ignoreIfInTransition) {
    // create the assign procs only for the primary region using the targetServer
    TransitRegionStateProcedure[] primaryRegionProcs = regions.stream().map(env.getAssignmentManager().getRegionStates()::getOrCreateRegionStateNode).map(regionNode -> {
        TransitRegionStateProcedure proc = TransitRegionStateProcedure.assign(env, regionNode.getRegionInfo(), targetServer);
        regionNode.lock();
        try {
            if (ignoreIfInTransition) {
                if (regionNode.isInTransition()) {
                    return null;
                }
            } else {
                // not process it either.
                assert !regionNode.isInTransition();
            }
            regionNode.setProcedure(proc);
        } finally {
            regionNode.unlock();
        }
        return proc;
    }).filter(p -> p != null).toArray(TransitRegionStateProcedure[]::new);
    if (regionReplication == DEFAULT_REGION_REPLICA) {
        // this is the default case
        return primaryRegionProcs;
    }
    // collect the replica region infos
    List<RegionInfo> replicaRegionInfos = new ArrayList<RegionInfo>(regions.size() * (regionReplication - 1));
    for (RegionInfo hri : regions) {
        // start the index from 1
        for (int i = 1; i < regionReplication; i++) {
            replicaRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(hri, i));
        }
    }
    // create round robin procs. Note that we exclude the primary region's target server
    TransitRegionStateProcedure[] replicaRegionAssignProcs = env.getAssignmentManager().createRoundRobinAssignProcedures(replicaRegionInfos, Collections.singletonList(targetServer));
    // combine both the procs and return the result
    return ArrayUtils.addAll(primaryRegionProcs, replicaRegionAssignProcs);
}
Also used : IntStream(java.util.stream.IntStream) DEFAULT_HBASE_ENABLE_SEPARATE_CHILD_REGIONS(org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_ENABLE_SEPARATE_CHILD_REGIONS) ListIterator(java.util.ListIterator) GetRegionInfoResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse) FavoredNodesManager(org.apache.hadoop.hbase.favored.FavoredNodesManager) RegionReplicaUtil(org.apache.hadoop.hbase.client.RegionReplicaUtil) WALSplitUtil(org.apache.hadoop.hbase.wal.WALSplitUtil) IOException(java.io.IOException) ArrayUtils(org.apache.commons.lang3.ArrayUtils) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Collectors(java.util.stream.Collectors) RequestConverter(org.apache.hadoop.hbase.shaded.protobuf.RequestConverter) ArrayList(java.util.ArrayList) GetRegionInfoRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest) List(java.util.List) HConstants(org.apache.hadoop.hbase.HConstants) Stream(java.util.stream.Stream) FutureUtils(org.apache.hadoop.hbase.util.FutureUtils) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) AsyncRegionServerAdmin(org.apache.hadoop.hbase.client.AsyncRegionServerAdmin) Collections(java.util.Collections) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo)

Example 35 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class CatalogJanitor method cleanMergeRegion.

/**
 * If merged region no longer holds reference to the merge regions, archive merge region on hdfs
 * and perform deleting references in hbase:meta
 * @return true if we delete references in merged region on hbase:meta and archive the files on
 *         the file system
 */
private boolean cleanMergeRegion(final RegionInfo mergedRegion, List<RegionInfo> parents) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Cleaning merged region {}", mergedRegion);
    }
    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = CommonFSUtils.getTableDir(rootdir, mergedRegion.getTable());
    TableDescriptor htd = getDescriptor(mergedRegion.getTable());
    HRegionFileSystem regionFs = null;
    try {
        regionFs = HRegionFileSystem.openRegionFromFileSystem(this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
    } catch (IOException e) {
        LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
    }
    if (regionFs == null || !regionFs.hasReferences(htd)) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Deleting parents ({}) from fs; merged child {} no longer holds references", parents.stream().map(r -> RegionInfo.getShortNameToLog(r)).collect(Collectors.joining(", ")), mergedRegion);
        }
        ProcedureExecutor<MasterProcedureEnv> pe = this.services.getMasterProcedureExecutor();
        GCMultipleMergedRegionsProcedure mergeRegionProcedure = new GCMultipleMergedRegionsProcedure(pe.getEnvironment(), mergedRegion, parents);
        pe.submitProcedure(mergeRegionProcedure);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Submitted procedure {} for merged region {}", mergeRegionProcedure, mergedRegion);
        }
        return true;
    }
    return false;
}
Also used : Path(org.apache.hadoop.fs.Path) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) GCMultipleMergedRegionsProcedure(org.apache.hadoop.hbase.master.assignment.GCMultipleMergedRegionsProcedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Aggregations

MasterProcedureEnv (org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv)65 Test (org.junit.Test)48 TableName (org.apache.hadoop.hbase.TableName)42 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)39 IOException (java.io.IOException)16 ProcedureExecutor (org.apache.hadoop.hbase.procedure2.ProcedureExecutor)16 ArrayList (java.util.ArrayList)13 Path (org.apache.hadoop.fs.Path)13 HBaseClassTestRule (org.apache.hadoop.hbase.HBaseClassTestRule)13 MasterTests (org.apache.hadoop.hbase.testclassification.MasterTests)13 Bytes (org.apache.hadoop.hbase.util.Bytes)13 ClassRule (org.junit.ClassRule)13 Category (org.junit.experimental.categories.Category)13 BeforeClass (org.junit.BeforeClass)12 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)11 ServerName (org.apache.hadoop.hbase.ServerName)10 Procedure (org.apache.hadoop.hbase.procedure2.Procedure)10 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)10 MediumTests (org.apache.hadoop.hbase.testclassification.MediumTests)10 AfterClass (org.junit.AfterClass)10