use of org.apache.hadoop.hbase.ClusterId in project hbase by apache.
the class TestReplicationSourceManager method setupZkAndReplication.
protected static void setupZkAndReplication() throws Exception {
// The implementing class should set up the conf
assertNotNull(conf);
zkw = new ZooKeeperWatcher(conf, "test", null);
ZKUtil.createWithParents(zkw, "/hbase/replication");
ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1");
ZKUtil.setData(zkw, "/hbase/replication/peers/1", Bytes.toBytes(conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1"));
ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state");
ZKUtil.setData(zkw, "/hbase/replication/peers/1/peer-state", ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
ZKUtil.createWithParents(zkw, "/hbase/replication/state");
ZKUtil.setData(zkw, "/hbase/replication/state", ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
ZKClusterId.setClusterId(zkw, new ClusterId());
FSUtils.setRootDir(utility.getConfiguration(), utility.getDataTestDir());
fs = FileSystem.get(conf);
oldLogDir = new Path(utility.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(utility.getDataTestDir(), HConstants.HREGION_LOGDIR_NAME);
replication = new Replication(new DummyServer(), fs, logDir, oldLogDir);
manager = replication.getReplicationManager();
manager.addSource(slaveId);
htd = new HTableDescriptor(test);
HColumnDescriptor col = new HColumnDescriptor(f1);
col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
htd.addFamily(col);
col = new HColumnDescriptor(f2);
col.setScope(HConstants.REPLICATION_SCOPE_LOCAL);
htd.addFamily(col);
scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getFamiliesKeys()) {
scopes.put(fam, 0);
}
hri = new HRegionInfo(htd.getTableName(), r1, r2);
}
use of org.apache.hadoop.hbase.ClusterId in project hbase by apache.
the class TestReplicationStateZKImpl method initPeerClusterState.
private static String initPeerClusterState(String baseZKNode) throws IOException, KeeperException {
// Add a dummy region server and set up the cluster id
Configuration testConf = new Configuration(conf);
testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode);
ZooKeeperWatcher zkw1 = new ZooKeeperWatcher(testConf, "test1", null);
String fakeRs = ZKUtil.joinZNode(zkw1.znodePaths.rsZNode, "hostname1.example.org:1234");
ZKUtil.createWithParents(zkw1, fakeRs);
ZKClusterId.setClusterId(zkw1, new ClusterId());
return ZKConfig.getZooKeeperClusterKey(testConf);
}
use of org.apache.hadoop.hbase.ClusterId in project hbase by apache.
the class MasterFileSystem method checkRootDir.
/**
* Get the rootdir. Make sure its wholesome and exists before returning.
* @param rd
* @param c
* @param fs
* @return hbase.rootdir (after checks for existence and bootstrapping if
* needed populating the directory with necessary bootup files).
* @throws IOException
*/
private Path checkRootDir(final Path rd, final Configuration c, final FileSystem fs) throws IOException {
// If FS is in safe mode wait till out of it.
FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
// Filesystem is good. Go ahead and check for hbase.rootdir.
try {
if (!fs.exists(rd)) {
fs.mkdirs(rd);
// DFS leaves safe mode with 0 DNs when there are 0 blocks.
// We used to handle this by checking the current DN count and waiting until
// it is nonzero. With security, the check for datanode count doesn't work --
// it is a privileged op. So instead we adopt the strategy of the jobtracker
// and simply retry file creation during bootstrap indefinitely. As soon as
// there is one datanode it will succeed. Permission problems should have
// already been caught by mkdirs above.
FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
} else {
if (!fs.isDirectory(rd)) {
throw new IllegalArgumentException(rd.toString() + " is not a directory");
}
// as above
FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
}
} catch (DeserializationException de) {
LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
IOException ioe = new IOException();
ioe.initCause(de);
throw ioe;
} catch (IllegalArgumentException iae) {
LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR + " " + rd.toString(), iae);
throw iae;
}
// Make sure cluster ID exists
if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
}
clusterId = FSUtils.getClusterId(fs, rd);
// Make sure the meta region directory exists!
if (!FSUtils.metaRegionExists(fs, rd)) {
bootstrap(rd, c);
}
// Create tableinfo-s for hbase:meta if not already there.
// assume, created table descriptor is for enabling table
// meta table is a system table, so descriptors are predefined,
// we should get them from registry.
FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME));
return rd;
}
use of org.apache.hadoop.hbase.ClusterId in project hbase by apache.
the class ProtobufUtil method convert.
/**
* Convert a ClusterStatus to a protobuf ClusterStatus
*
* @return the protobuf ClusterStatus
*/
public static ClusterStatusProtos.ClusterStatus convert(ClusterStatus status) {
ClusterStatusProtos.ClusterStatus.Builder builder = ClusterStatusProtos.ClusterStatus.newBuilder();
builder.setHbaseVersion(HBaseVersionFileContent.newBuilder().setVersion(status.getHBaseVersion()));
if (status.getServers() != null) {
for (ServerName serverName : status.getServers()) {
LiveServerInfo.Builder lsi = LiveServerInfo.newBuilder().setServer(ProtobufUtil.toServerName(serverName));
status.getLoad(serverName);
lsi.setServerLoad(status.getLoad(serverName).obtainServerLoadPB());
builder.addLiveServers(lsi.build());
}
}
if (status.getDeadServerNames() != null) {
for (ServerName deadServer : status.getDeadServerNames()) {
builder.addDeadServers(ProtobufUtil.toServerName(deadServer));
}
}
if (status.getRegionsInTransition() != null) {
for (RegionState rit : status.getRegionsInTransition()) {
ClusterStatusProtos.RegionState rs = rit.convert();
RegionSpecifier.Builder spec = RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME);
spec.setValue(UnsafeByteOperations.unsafeWrap(rit.getRegion().getRegionName()));
RegionInTransition pbRIT = RegionInTransition.newBuilder().setSpec(spec.build()).setRegionState(rs).build();
builder.addRegionsInTransition(pbRIT);
}
}
if (status.getClusterId() != null) {
builder.setClusterId(new ClusterId(status.getClusterId()).convert());
}
if (status.getMasterCoprocessors() != null) {
for (String coprocessor : status.getMasterCoprocessors()) {
builder.addMasterCoprocessors(HBaseProtos.Coprocessor.newBuilder().setName(coprocessor));
}
}
if (status.getMaster() != null) {
builder.setMaster(ProtobufUtil.toServerName(status.getMaster()));
}
if (status.getBackupMasters() != null) {
for (ServerName backup : status.getBackupMasters()) {
builder.addBackupMasters(ProtobufUtil.toServerName(backup));
}
}
if (status.getBalancerOn() != null) {
builder.setBalancerOn(status.getBalancerOn());
}
return builder.build();
}
use of org.apache.hadoop.hbase.ClusterId in project hbase by apache.
the class FSUtils method getClusterId.
/**
* Returns the value of the unique cluster ID stored for this HBase instance.
* @param fs the root directory FileSystem
* @param rootdir the path to the HBase root directory
* @return the unique cluster identifier
* @throws IOException if reading the cluster ID file fails
*/
public static ClusterId getClusterId(FileSystem fs, Path rootdir) throws IOException {
Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
ClusterId clusterId = null;
FileStatus status = fs.exists(idPath) ? fs.getFileStatus(idPath) : null;
if (status != null) {
int len = Ints.checkedCast(status.getLen());
byte[] content = new byte[len];
FSDataInputStream in = fs.open(idPath);
try {
in.readFully(content);
} catch (EOFException eof) {
LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
} finally {
in.close();
}
try {
clusterId = ClusterId.parseFrom(content);
} catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toString(content), e);
}
// If not pb'd, make it so.
if (!ProtobufUtil.isPBMagicPrefix(content)) {
String cid = null;
in = fs.open(idPath);
try {
cid = in.readUTF();
clusterId = new ClusterId(cid);
} catch (EOFException eof) {
LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
} finally {
in.close();
}
rewriteAsPb(fs, rootdir, idPath, clusterId);
}
return clusterId;
} else {
LOG.warn("Cluster ID file does not exist at " + idPath.toString());
}
return clusterId;
}
Aggregations