Search in sources :

Example 61 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class AbstractPeerProcedure method setLastPushedSequenceId.

protected final void setLastPushedSequenceId(MasterProcedureEnv env, ReplicationPeerConfig peerConfig) throws IOException, ReplicationException {
    Map<String, Long> lastSeqIds = new HashMap<String, Long>();
    for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {
        if (!td.hasGlobalReplicationScope()) {
            continue;
        }
        TableName tn = td.getTableName();
        if (!peerConfig.needToReplicate(tn)) {
            continue;
        }
        setLastPushedSequenceIdForTable(env, tn, lastSeqIds);
    }
    if (!lastSeqIds.isEmpty()) {
        env.getReplicationPeerManager().getQueueStorage().setLastSequenceIds(peerId, lastSeqIds);
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 62 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class InitMetaProcedure method executeFromState.

@Override
protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state) throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
    LOG.debug("Execute {}", this);
    try {
        switch(state) {
            case INIT_META_WRITE_FS_LAYOUT:
                Configuration conf = env.getMasterConfiguration();
                Path rootDir = CommonFSUtils.getRootDir(conf);
                TableDescriptor td = writeFsLayout(rootDir, conf);
                env.getMasterServices().getTableDescriptors().update(td, true);
                setNextState(InitMetaState.INIT_META_ASSIGN_META);
                return Flow.HAS_MORE_STATE;
            case INIT_META_ASSIGN_META:
                LOG.info("Going to assign meta");
                addChildProcedure(env.getAssignmentManager().createAssignProcedures(Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO)));
                setNextState(InitMetaState.INIT_META_CREATE_NAMESPACES);
                return Flow.HAS_MORE_STATE;
            case INIT_META_CREATE_NAMESPACES:
                LOG.info("Going to create {} and {} namespaces", DEFAULT_NAMESPACE, SYSTEM_NAMESPACE);
                createDirectory(env, DEFAULT_NAMESPACE);
                createDirectory(env, SYSTEM_NAMESPACE);
                // here the TableNamespaceManager has not been initialized yet, so we have to insert the
                // record directly into meta table, later the TableNamespaceManager will load these two
                // namespaces when starting.
                insertNamespaceToMeta(env.getMasterServices().getConnection(), DEFAULT_NAMESPACE);
                insertNamespaceToMeta(env.getMasterServices().getConnection(), SYSTEM_NAMESPACE);
                return Flow.NO_MORE_STATE;
            default:
                throw new UnsupportedOperationException("unhandled state=" + state);
        }
    } catch (IOException e) {
        if (retryCounter == null) {
            retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
        }
        long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
        LOG.warn("Failed to init meta, suspend {}secs", backoff, e);
        setTimeout(Math.toIntExact(backoff));
        setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
        skipPersistence();
        throw new ProcedureSuspendedException();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) ProcedureSuspendedException(org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 63 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class InitMetaProcedure method writeFsLayout.

private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) throws IOException {
    LOG.info("BOOTSTRAP: creating hbase:meta region");
    FileSystem fs = rootDir.getFileSystem(conf);
    Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME);
    if (fs.exists(tableDir) && !fs.delete(tableDir, true)) {
        LOG.warn("Can not delete partial created meta table, continue...");
    }
    // Bootstrapping, make sure blockcache is off. Else, one will be
    // created here in bootstrap and it'll need to be cleaned up. Better to
    // not make it in first place. Turn off block caching for bootstrap.
    // Enable after.
    TableDescriptor metaDescriptor = FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir);
    HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null).close();
    return metaDescriptor;
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 64 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class ModifyPeerProcedure method reopenRegions.

// will be override in test to simulate error
protected void reopenRegions(MasterProcedureEnv env) throws IOException {
    ReplicationPeerConfig peerConfig = getNewPeerConfig();
    ReplicationPeerConfig oldPeerConfig = getOldPeerConfig();
    TableStateManager tsm = env.getMasterServices().getTableStateManager();
    for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {
        if (!td.hasGlobalReplicationScope()) {
            continue;
        }
        TableName tn = td.getTableName();
        if (!peerConfig.needToReplicate(tn)) {
            continue;
        }
        if (oldPeerConfig != null && oldPeerConfig.isSerial() && oldPeerConfig.needToReplicate(tn)) {
            continue;
        }
        if (needReopen(tsm, tn)) {
            addChildProcedure(new ReopenTableRegionsProcedure(tn));
        }
    }
}
Also used : TableStateManager(org.apache.hadoop.hbase.master.TableStateManager) TableName(org.apache.hadoop.hbase.TableName) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) ReopenTableRegionsProcedure(org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 65 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestSplitMerge method test.

@Test
public void test() throws Exception {
    TableName tableName = TableName.valueOf("SplitMerge");
    byte[] family = Bytes.toBytes("CF");
    TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
    UTIL.getAdmin().createTable(td, new byte[][] { Bytes.toBytes(1) });
    UTIL.waitTableAvailable(tableName);
    UTIL.getAdmin().split(tableName, Bytes.toBytes(2));
    UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {

        @Override
        public boolean evaluate() throws Exception {
            return UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3;
        }

        @Override
        public String explainFailure() throws Exception {
            return "Split has not finished yet";
        }
    });
    UTIL.waitUntilNoRegionsInTransition();
    RegionInfo regionA = null;
    RegionInfo regionB = null;
    for (RegionInfo region : UTIL.getAdmin().getRegions(tableName)) {
        if (region.getStartKey().length == 0) {
            regionA = region;
        } else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) {
            regionB = region;
        }
    }
    assertNotNull(regionA);
    assertNotNull(regionB);
    UTIL.getAdmin().mergeRegionsAsync(regionA.getRegionName(), regionB.getRegionName(), false).get(30, TimeUnit.SECONDS);
    assertEquals(2, UTIL.getAdmin().getRegions(tableName).size());
    ServerName expected = UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName();
    assertEquals(expected, UTIL.getConnection().getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).getServerName());
    try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) {
        assertEquals(expected, asyncConn.getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).get().getServerName());
    }
}
Also used : AsyncConnection(org.apache.hadoop.hbase.client.AsyncConnection) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39