use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class AbstractPeerProcedure method setLastPushedSequenceId.
protected final void setLastPushedSequenceId(MasterProcedureEnv env, ReplicationPeerConfig peerConfig) throws IOException, ReplicationException {
Map<String, Long> lastSeqIds = new HashMap<String, Long>();
for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {
if (!td.hasGlobalReplicationScope()) {
continue;
}
TableName tn = td.getTableName();
if (!peerConfig.needToReplicate(tn)) {
continue;
}
setLastPushedSequenceIdForTable(env, tn, lastSeqIds);
}
if (!lastSeqIds.isEmpty()) {
env.getReplicationPeerManager().getQueueStorage().setLastSequenceIds(peerId, lastSeqIds);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class InitMetaProcedure method executeFromState.
@Override
protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state) throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
LOG.debug("Execute {}", this);
try {
switch(state) {
case INIT_META_WRITE_FS_LAYOUT:
Configuration conf = env.getMasterConfiguration();
Path rootDir = CommonFSUtils.getRootDir(conf);
TableDescriptor td = writeFsLayout(rootDir, conf);
env.getMasterServices().getTableDescriptors().update(td, true);
setNextState(InitMetaState.INIT_META_ASSIGN_META);
return Flow.HAS_MORE_STATE;
case INIT_META_ASSIGN_META:
LOG.info("Going to assign meta");
addChildProcedure(env.getAssignmentManager().createAssignProcedures(Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO)));
setNextState(InitMetaState.INIT_META_CREATE_NAMESPACES);
return Flow.HAS_MORE_STATE;
case INIT_META_CREATE_NAMESPACES:
LOG.info("Going to create {} and {} namespaces", DEFAULT_NAMESPACE, SYSTEM_NAMESPACE);
createDirectory(env, DEFAULT_NAMESPACE);
createDirectory(env, SYSTEM_NAMESPACE);
// here the TableNamespaceManager has not been initialized yet, so we have to insert the
// record directly into meta table, later the TableNamespaceManager will load these two
// namespaces when starting.
insertNamespaceToMeta(env.getMasterServices().getConnection(), DEFAULT_NAMESPACE);
insertNamespaceToMeta(env.getMasterServices().getConnection(), SYSTEM_NAMESPACE);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);
}
} catch (IOException e) {
if (retryCounter == null) {
retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
}
long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
LOG.warn("Failed to init meta, suspend {}secs", backoff, e);
setTimeout(Math.toIntExact(backoff));
setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
skipPersistence();
throw new ProcedureSuspendedException();
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class InitMetaProcedure method writeFsLayout.
private static TableDescriptor writeFsLayout(Path rootDir, Configuration conf) throws IOException {
LOG.info("BOOTSTRAP: creating hbase:meta region");
FileSystem fs = rootDir.getFileSystem(conf);
Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME);
if (fs.exists(tableDir) && !fs.delete(tableDir, true)) {
LOG.warn("Can not delete partial created meta table, continue...");
}
// Bootstrapping, make sure blockcache is off. Else, one will be
// created here in bootstrap and it'll need to be cleaned up. Better to
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
TableDescriptor metaDescriptor = FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(conf, fs, rootDir);
HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rootDir, conf, metaDescriptor, null).close();
return metaDescriptor;
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class ModifyPeerProcedure method reopenRegions.
// will be override in test to simulate error
protected void reopenRegions(MasterProcedureEnv env) throws IOException {
ReplicationPeerConfig peerConfig = getNewPeerConfig();
ReplicationPeerConfig oldPeerConfig = getOldPeerConfig();
TableStateManager tsm = env.getMasterServices().getTableStateManager();
for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {
if (!td.hasGlobalReplicationScope()) {
continue;
}
TableName tn = td.getTableName();
if (!peerConfig.needToReplicate(tn)) {
continue;
}
if (oldPeerConfig != null && oldPeerConfig.isSerial() && oldPeerConfig.needToReplicate(tn)) {
continue;
}
if (needReopen(tsm, tn)) {
addChildProcedure(new ReopenTableRegionsProcedure(tn));
}
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestSplitMerge method test.
@Test
public void test() throws Exception {
TableName tableName = TableName.valueOf("SplitMerge");
byte[] family = Bytes.toBytes("CF");
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
UTIL.getAdmin().createTable(td, new byte[][] { Bytes.toBytes(1) });
UTIL.waitTableAvailable(tableName);
UTIL.getAdmin().split(tableName, Bytes.toBytes(2));
UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3;
}
@Override
public String explainFailure() throws Exception {
return "Split has not finished yet";
}
});
UTIL.waitUntilNoRegionsInTransition();
RegionInfo regionA = null;
RegionInfo regionB = null;
for (RegionInfo region : UTIL.getAdmin().getRegions(tableName)) {
if (region.getStartKey().length == 0) {
regionA = region;
} else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) {
regionB = region;
}
}
assertNotNull(regionA);
assertNotNull(regionB);
UTIL.getAdmin().mergeRegionsAsync(regionA.getRegionName(), regionB.getRegionName(), false).get(30, TimeUnit.SECONDS);
assertEquals(2, UTIL.getAdmin().getRegions(tableName).size());
ServerName expected = UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName();
assertEquals(expected, UTIL.getConnection().getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).getServerName());
try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(UTIL.getConfiguration()).get()) {
assertEquals(expected, asyncConn.getRegionLocator(tableName).getRegionLocation(Bytes.toBytes(1), true).get().getServerName());
}
}
Aggregations