use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestAccessController method testListProcedures.
@Test
public void testListProcedures() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
Procedure proc = new TestTableDDLProcedure(procExec.getEnvironment(), tableName);
proc.setOwner(USER_OWNER);
final long procId = procExec.submitProcedure(proc);
final List<ProcedureInfo> procInfoList = procExec.listProcedures();
AccessTestAction listProceduresAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
List<ProcedureInfo> procInfoListClone = new ArrayList<>(procInfoList.size());
for (ProcedureInfo pi : procInfoList) {
procInfoListClone.add(pi.clone());
}
ACCESS_CONTROLLER.postListProcedures(ObserverContext.createAndPrepare(CP_ENV, null), procInfoListClone);
return null;
}
};
verifyAllowed(listProceduresAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN);
verifyAllowed(listProceduresAction, USER_OWNER);
verifyIfNull(listProceduresAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class HMaster method startProcedureExecutor.
private void startProcedureExecutor() throws IOException {
final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
final Path walDir = new Path(FSUtils.getWALRootDir(this.conf), MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
procedureStore = new WALProcedureStore(conf, walDir.getFileSystem(conf), walDir, new MasterProcedureEnv.WALStoreLeaseRecovery(this));
procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore, procEnv.getProcedureScheduler());
configurationManager.registerObserver(procEnv);
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, Math.max(Runtime.getRuntime().availableProcessors(), MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
final boolean abortOnCorruption = conf.getBoolean(MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
procedureStore.start(numThreads);
procedureExecutor.start(numThreads, abortOnCorruption);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class AssignmentManagerUtil method removeNonDefaultReplicas.
static void removeNonDefaultReplicas(MasterProcedureEnv env, Stream<RegionInfo> regions, int regionReplication) {
// Remove from in-memory states
regions.flatMap(hri -> IntStream.range(1, regionReplication).mapToObj(i -> RegionReplicaUtil.getRegionInfoForReplica(hri, i))).forEach(hri -> {
env.getAssignmentManager().getRegionStates().deleteRegion(hri);
env.getMasterServices().getServerManager().removeRegion(hri);
FavoredNodesManager fnm = env.getMasterServices().getFavoredNodesManager();
if (fnm != null) {
fnm.deleteFavoredNodesForRegions(Collections.singletonList(hri));
}
});
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class AssignmentManagerUtil method createAssignProcedures.
/**
* Create assign procedures for the give regions, according to the {@code regionReplication}.
* <p/>
* For rolling back, we will submit procedures directly to the {@code ProcedureExecutor}, so it is
* possible that we persist the newly scheduled procedures, and then crash before persisting the
* rollback state, so when we arrive here the second time, it is possible that some regions have
* already been associated with a TRSP.
* @param ignoreIfInTransition if true, will skip creating TRSP for the given region if it is
* already in transition, otherwise we will add an assert that it should not in
* transition.
*/
private static TransitRegionStateProcedure[] createAssignProcedures(MasterProcedureEnv env, List<RegionInfo> regions, int regionReplication, ServerName targetServer, boolean ignoreIfInTransition) {
// create the assign procs only for the primary region using the targetServer
TransitRegionStateProcedure[] primaryRegionProcs = regions.stream().map(env.getAssignmentManager().getRegionStates()::getOrCreateRegionStateNode).map(regionNode -> {
TransitRegionStateProcedure proc = TransitRegionStateProcedure.assign(env, regionNode.getRegionInfo(), targetServer);
regionNode.lock();
try {
if (ignoreIfInTransition) {
if (regionNode.isInTransition()) {
return null;
}
} else {
// not process it either.
assert !regionNode.isInTransition();
}
regionNode.setProcedure(proc);
} finally {
regionNode.unlock();
}
return proc;
}).filter(p -> p != null).toArray(TransitRegionStateProcedure[]::new);
if (regionReplication == DEFAULT_REGION_REPLICA) {
// this is the default case
return primaryRegionProcs;
}
// collect the replica region infos
List<RegionInfo> replicaRegionInfos = new ArrayList<RegionInfo>(regions.size() * (regionReplication - 1));
for (RegionInfo hri : regions) {
// start the index from 1
for (int i = 1; i < regionReplication; i++) {
replicaRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(hri, i));
}
}
// create round robin procs. Note that we exclude the primary region's target server
TransitRegionStateProcedure[] replicaRegionAssignProcs = env.getAssignmentManager().createRoundRobinAssignProcedures(replicaRegionInfos, Collections.singletonList(targetServer));
// combine both the procs and return the result
return ArrayUtils.addAll(primaryRegionProcs, replicaRegionAssignProcs);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class CatalogJanitor method cleanMergeRegion.
/**
* If merged region no longer holds reference to the merge regions, archive merge region on hdfs
* and perform deleting references in hbase:meta
* @return true if we delete references in merged region on hbase:meta and archive the files on
* the file system
*/
private boolean cleanMergeRegion(final RegionInfo mergedRegion, List<RegionInfo> parents) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Cleaning merged region {}", mergedRegion);
}
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = CommonFSUtils.getTableDir(rootdir, mergedRegion.getTable());
TableDescriptor htd = getDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
} catch (IOException e) {
LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
}
if (regionFs == null || !regionFs.hasReferences(htd)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting parents ({}) from fs; merged child {} no longer holds references", parents.stream().map(r -> RegionInfo.getShortNameToLog(r)).collect(Collectors.joining(", ")), mergedRegion);
}
ProcedureExecutor<MasterProcedureEnv> pe = this.services.getMasterProcedureExecutor();
GCMultipleMergedRegionsProcedure mergeRegionProcedure = new GCMultipleMergedRegionsProcedure(pe.getEnvironment(), mergedRegion, parents);
pe.submitProcedure(mergeRegionProcedure);
if (LOG.isDebugEnabled()) {
LOG.debug("Submitted procedure {} for merged region {}", mergeRegionProcedure, mergedRegion);
}
return true;
}
return false;
}
Aggregations