Search in sources :

Example 26 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestSplitTableRegionProcedure method testSplitTableRegion.

@Test
public void testSplitTableRegion() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
    RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName1, columnFamilyName2);
    insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2);
    int splitRowNum = startRowNum + rowCount / 2;
    byte[] splitKey = Bytes.toBytes("" + splitRowNum);
    assertTrue("not able to find a splittable region", regions != null);
    assertTrue("not able to find a splittable region", regions.length == 1);
    // collect AM metrics before test
    collectAssignmentManagerMetrics();
    // Split region of the table
    long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
    // Wait the completion
    ProcedureTestingUtility.waitProcedure(procExec, procId);
    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
    verify(tableName, splitRowNum);
    assertEquals(splitSubmittedCount + 1, splitProcMetrics.getSubmittedCounter().getCount());
    assertEquals(splitFailedCount, splitProcMetrics.getFailedCounter().getCount());
    assertEquals(assignSubmittedCount + 2, assignProcMetrics.getSubmittedCounter().getCount());
    assertEquals(assignFailedCount, assignProcMetrics.getFailedCounter().getCount());
    assertEquals(unassignSubmittedCount + 1, unassignProcMetrics.getSubmittedCounter().getCount());
    assertEquals(unassignFailedCount, unassignProcMetrics.getFailedCounter().getCount());
}
Also used : TableName(org.apache.hadoop.hbase.TableName) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Test(org.junit.Test)

Example 27 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestAccessController method testGetProcedures.

@Test
public void testGetProcedures() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final ProcedureExecutor<MasterProcedureEnv> procExec = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
    Procedure proc = new TestTableDDLProcedure(procExec.getEnvironment(), tableName);
    proc.setOwner(USER_OWNER);
    procExec.submitProcedure(proc);
    final List<Procedure<MasterProcedureEnv>> procList = procExec.getProcedures();
    AccessTestAction getProceduresAction = new AccessTestAction() {

        @Override
        public Object run() throws Exception {
            ACCESS_CONTROLLER.postGetProcedures(ObserverContextImpl.createAndPrepare(CP_ENV));
            return null;
        }
    };
    verifyAllowed(getProceduresAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN);
    verifyAllowed(getProceduresAction, USER_OWNER);
    verifyIfNull(getProceduresAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) LockProcedure(org.apache.hadoop.hbase.master.locking.LockProcedure) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Test(org.junit.Test)

Example 28 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class RSGroupInfoManagerImpl method migrate.

private void migrate(Collection<RSGroupInfo> groupList) {
    TableDescriptors tds = masterServices.getTableDescriptors();
    ProcedureExecutor<MasterProcedureEnv> procExec = masterServices.getMasterProcedureExecutor();
    for (RSGroupInfo groupInfo : groupList) {
        if (groupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
            continue;
        }
        SortedSet<TableName> failedTables = new TreeSet<>();
        List<MigrateRSGroupProcedure> procs = new ArrayList<>();
        for (TableName tableName : groupInfo.getTables()) {
            LOG.debug("Migrating {} in group {}", tableName, groupInfo.getName());
            TableDescriptor oldTd;
            try {
                oldTd = tds.get(tableName);
            } catch (IOException e) {
                LOG.warn("Failed to migrate {} in group {}", tableName, groupInfo.getName(), e);
                failedTables.add(tableName);
                continue;
            }
            if (oldTd == null) {
                continue;
            }
            if (oldTd.getRegionServerGroup().isPresent()) {
                // either we have already migrated it or that user has set the rs group using the new
                // code which will set the group directly on table descriptor, skip.
                LOG.debug("Skip migrating {} since it is already in group {}", tableName, oldTd.getRegionServerGroup().get());
                continue;
            }
            // This is a bit tricky. Since we know that the region server group config in
            // TableDescriptor will only be used at master side, it is fine to just update the table
            // descriptor on file system and also the cache, without reopening all the regions. This
            // will be much faster than the normal modifyTable. And when upgrading, we will update
            // master first and then region server, so after all the region servers has been reopened,
            // the new TableDescriptor will be loaded.
            MigrateRSGroupProcedure proc = new MigrateRSGroupProcedure(procExec.getEnvironment(), tableName);
            procExec.submitProcedure(proc);
            procs.add(proc);
        }
        for (MigrateRSGroupProcedure proc : procs) {
            try {
                ProcedureSyncWait.waitForProcedureToComplete(procExec, proc, 60000);
            } catch (IOException e) {
                LOG.warn("Failed to migrate rs group {} for table {}", groupInfo.getName(), proc.getTableName());
                failedTables.add(proc.getTableName());
            }
        }
        LOG.debug("Done migrating {}, failed tables {}", groupInfo.getName(), failedTables);
        synchronized (RSGroupInfoManagerImpl.this) {
            Map<String, RSGroupInfo> rsGroupMap = holder.groupName2Group;
            RSGroupInfo currentInfo = rsGroupMap.get(groupInfo.getName());
            if (currentInfo != null) {
                RSGroupInfo newInfo = new RSGroupInfo(currentInfo.getName(), currentInfo.getServers(), failedTables);
                Map<String, RSGroupInfo> newGroupMap = new HashMap<>(rsGroupMap);
                newGroupMap.put(groupInfo.getName(), newInfo);
                try {
                    flushConfig(newGroupMap);
                } catch (IOException e) {
                    LOG.warn("Failed to persist rs group {}", newInfo.getName(), e);
                }
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) TableName(org.apache.hadoop.hbase.TableName) TableDescriptors(org.apache.hadoop.hbase.TableDescriptors) TreeSet(java.util.TreeSet)

Example 29 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class ServerManager method processDeadServer.

public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
    // the corresponding server is down. So we queue them up here instead.
    if (!master.getAssignmentManager().isFailoverCleanupDone()) {
        requeuedDeadServers.put(serverName, shouldSplitWal);
        return;
    }
    this.deadservers.add(serverName);
    ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
    procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(), serverName, shouldSplitWal, false));
}
Also used : ServerCrashProcedure(org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv)

Example 30 with MasterProcedureEnv

use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.

the class TestHBaseFsckOneRS method testCleanUpDaughtersNotInMetaAfterFailedSplit.

@Test(timeout = 180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
        createTable(TEST_UTIL, desc, null);
        tbl = connection.getTable(desc.getTableName());
        for (int i = 0; i < 5; i++) {
            Put p1 = new Put(("r" + i).getBytes());
            p1.addColumn(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
            tbl.put(p1);
        }
        admin.flush(desc.getTableName());
        List<HRegion> regions = cluster.getRegions(desc.getTableName());
        int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
        HRegionServer regionServer = cluster.getRegionServer(serverWith);
        byte[] parentRegionName = regions.get(0).getRegionInfo().getRegionName();
        cluster.getServerWith(parentRegionName);
        // Create daughters without adding to META table
        MasterProcedureEnv env = cluster.getMaster().getMasterProcedureExecutor().getEnvironment();
        SplitTableRegionProcedure splitR = new SplitTableRegionProcedure(env, regions.get(0).getRegionInfo(), Bytes.toBytes("r3"));
        splitR.prepareSplitRegion(env);
        splitR.setRegionStateToSplitting(env);
        splitR.closeParentRegionForSplit(env);
        splitR.createDaughterRegions(env);
        AssignmentManager am = cluster.getMaster().getAssignmentManager();
        for (RegionState state : am.getRegionStates().getRegionsInTransition()) {
            am.regionOffline(state.getRegion());
        }
        Map<HRegionInfo, ServerName> regionsMap = new HashMap<>();
        regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
        am.assign(regionsMap);
        am.waitForAssignment(regions.get(0).getRegionInfo());
        HBaseFsck hbck = doFsck(conf, false);
        assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
        // holes are separate from overlap groups
        assertEquals(0, hbck.getOverlapGroups(tableName).size());
        // fix hole
        assertErrors(doFsck(conf, false, true, false, false, false, false, false, false, false, false, false, false, null), new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
        // check that hole fixed
        assertNoErrors(doFsck(conf, false));
        assertEquals(5, countRows());
    } finally {
        if (tbl != null) {
            tbl.close();
            tbl = null;
        }
        cleanupTable(tableName);
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HashMap(java.util.HashMap) AssignmentManager(org.apache.hadoop.hbase.master.AssignmentManager) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) SplitTableRegionProcedure(org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionState(org.apache.hadoop.hbase.master.RegionState) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Aggregations

MasterProcedureEnv (org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv)65 Test (org.junit.Test)48 TableName (org.apache.hadoop.hbase.TableName)42 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)39 IOException (java.io.IOException)16 ProcedureExecutor (org.apache.hadoop.hbase.procedure2.ProcedureExecutor)16 ArrayList (java.util.ArrayList)13 Path (org.apache.hadoop.fs.Path)13 HBaseClassTestRule (org.apache.hadoop.hbase.HBaseClassTestRule)13 MasterTests (org.apache.hadoop.hbase.testclassification.MasterTests)13 Bytes (org.apache.hadoop.hbase.util.Bytes)13 ClassRule (org.junit.ClassRule)13 Category (org.junit.experimental.categories.Category)13 BeforeClass (org.junit.BeforeClass)12 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)11 ServerName (org.apache.hadoop.hbase.ServerName)10 Procedure (org.apache.hadoop.hbase.procedure2.Procedure)10 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)10 MediumTests (org.apache.hadoop.hbase.testclassification.MediumTests)10 AfterClass (org.junit.AfterClass)10