use of org.apache.zookeeper.KeeperException in project hbase by apache.
the class RSGroupInfoManagerImpl method retrieveGroupListFromZookeeper.
List<RSGroupInfo> retrieveGroupListFromZookeeper() throws IOException {
String groupBasePath = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode);
List<RSGroupInfo> RSGroupInfoList = Lists.newArrayList();
//Overwrite any info stored by table, this takes precedence
try {
if (ZKUtil.checkExists(watcher, groupBasePath) != -1) {
for (String znode : ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) {
byte[] data = ZKUtil.getData(watcher, ZKUtil.joinZNode(groupBasePath, znode));
if (data.length > 0) {
ProtobufUtil.expectPBMagicPrefix(data);
ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
RSGroupInfoList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
}
}
LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size());
}
} catch (KeeperException | DeserializationException | InterruptedException e) {
throw new IOException("Failed to read rsGroupZNode", e);
}
return RSGroupInfoList;
}
use of org.apache.zookeeper.KeeperException in project hbase by apache.
the class VerifyingRSGroupAdminClient method verify.
public void verify() throws IOException {
Map<String, RSGroupInfo> groupMap = Maps.newHashMap();
Set<RSGroupInfo> zList = Sets.newHashSet();
for (Result result : table.getScanner(new Scan())) {
RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(result.getValue(RSGroupInfoManager.META_FAMILY_BYTES, RSGroupInfoManager.META_QUALIFIER_BYTES));
groupMap.put(proto.getName(), RSGroupProtobufUtil.toGroupInfo(proto));
}
Assert.assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(wrapped.listRSGroups()));
try {
String groupBasePath = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "rsgroup");
for (String znode : ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode));
if (data.length > 0) {
ProtobufUtil.expectPBMagicPrefix(data);
ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
zList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
}
}
Assert.assertEquals(zList.size(), groupMap.size());
for (RSGroupInfo RSGroupInfo : zList) {
Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo));
}
} catch (KeeperException e) {
throw new IOException("ZK verification failed", e);
} catch (DeserializationException e) {
throw new IOException("ZK verification failed", e);
} catch (InterruptedException e) {
throw new IOException("ZK verification failed", e);
}
}
use of org.apache.zookeeper.KeeperException in project hbase by apache.
the class ActiveMasterManager method blockUntilBecomingActiveMaster.
/**
* Block until becoming the active master.
*
* Method blocks until there is not another active master and our attempt
* to become the new active master is successful.
*
* This also makes sure that we are watching the master znode so will be
* notified if another master dies.
* @param checkInterval the interval to check if the master is stopped
* @param startupStatus the monitor status to track the progress
* @return True if no issue becoming active master else false if another
* master was running or if some other problem (zookeeper, stop flag has been
* set on this Master)
*/
boolean blockUntilBecomingActiveMaster(int checkInterval, MonitoredTask startupStatus) {
String backupZNode = ZKUtil.joinZNode(this.watcher.znodePaths.backupMasterAddressesZNode, this.sn.toString());
while (!(master.isAborted() || master.isStopped())) {
startupStatus.setStatus("Trying to register in ZK as active master");
// Write out our ServerName as versioned bytes.
try {
if (MasterAddressTracker.setMasterAddress(this.watcher, this.watcher.znodePaths.masterAddressZNode, this.sn, infoPort)) {
// master directory since we are the active now)
if (ZKUtil.checkExists(this.watcher, backupZNode) != -1) {
LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory");
ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode);
}
// Save the znode in a file, this will allow to check if we crash in the launch scripts
ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString());
// We are the master, return
startupStatus.setStatus("Successfully registered as active master.");
this.clusterHasActiveMaster.set(true);
LOG.info("Registered Active Master=" + this.sn);
return true;
}
// There is another active master running elsewhere or this is a restart
// and the master ephemeral node has not expired yet.
this.clusterHasActiveMaster.set(true);
String msg;
byte[] bytes = ZKUtil.getDataAndWatch(this.watcher, this.watcher.znodePaths.masterAddressZNode);
if (bytes == null) {
msg = ("A master was detected, but went down before its address " + "could be read. Attempting to become the next active master");
} else {
ServerName currentMaster;
try {
currentMaster = ProtobufUtil.parseServerNameFrom(bytes);
} catch (DeserializationException e) {
LOG.warn("Failed parse", e);
// Hopefully next time around we won't fail the parse. Dangerous.
continue;
}
if (ServerName.isSameHostnameAndPort(currentMaster, this.sn)) {
msg = ("Current master has this master's address, " + currentMaster + "; master was restarted? Deleting node.");
// Hurry along the expiration of the znode.
ZKUtil.deleteNode(this.watcher, this.watcher.znodePaths.masterAddressZNode);
// We may have failed to delete the znode at the previous step, but
// we delete the file anyway: a second attempt to delete the znode is likely to fail again.
ZNodeClearer.deleteMyEphemeralNodeOnDisk();
} else {
msg = "Another master is the active master, " + currentMaster + "; waiting to become the next active master";
}
}
LOG.info(msg);
startupStatus.setStatus(msg);
} catch (KeeperException ke) {
master.abort("Received an unexpected KeeperException, aborting", ke);
return false;
}
synchronized (this.clusterHasActiveMaster) {
while (clusterHasActiveMaster.get() && !master.isStopped()) {
try {
clusterHasActiveMaster.wait(checkInterval);
} catch (InterruptedException e) {
// We expect to be interrupted when a master dies,
// will fall out if so
LOG.debug("Interrupted waiting for master to die", e);
}
}
if (clusterShutDown.get()) {
this.master.stop("Cluster went down before this master became active");
}
}
}
return false;
}
use of org.apache.zookeeper.KeeperException in project hbase by apache.
the class ActiveMasterManager method stop.
public void stop() {
try {
synchronized (clusterHasActiveMaster) {
// Master is already stopped, wake up the manager
// thread so that it can shutdown soon.
clusterHasActiveMaster.notifyAll();
}
// If our address is in ZK, delete it on our way out
ServerName activeMaster = null;
try {
activeMaster = MasterAddressTracker.getMasterAddress(this.watcher);
} catch (IOException e) {
LOG.warn("Failed get of master address: " + e.toString());
}
if (activeMaster != null && activeMaster.equals(this.sn)) {
ZKUtil.deleteNode(watcher, watcher.znodePaths.masterAddressZNode);
// We may have failed to delete the znode at the previous step, but
// we delete the file anyway: a second attempt to delete the znode is likely to fail again.
ZNodeClearer.deleteMyEphemeralNodeOnDisk();
}
} catch (KeeperException e) {
LOG.error(this.watcher.prefix("Error deleting our own master address node"), e);
}
}
use of org.apache.zookeeper.KeeperException in project hbase by apache.
the class MasterRpcServices method setSplitOrMergeEnabled.
@Override
public SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled(RpcController controller, SetSplitOrMergeEnabledRequest request) throws ServiceException {
SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder();
try {
master.checkInitialized();
boolean newValue = request.getEnabled();
for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) {
MasterSwitchType switchType = convert(masterSwitchType);
boolean oldValue = master.isSplitOrMergeEnabled(switchType);
response.addPrevValue(oldValue);
boolean bypass = false;
if (master.cpHost != null) {
bypass = master.cpHost.preSetSplitOrMergeEnabled(newValue, switchType);
}
if (!bypass) {
master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType);
}
if (master.cpHost != null) {
master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType);
}
}
} catch (IOException e) {
throw new ServiceException(e);
} catch (KeeperException e) {
throw new ServiceException(e);
}
return response.build();
}
Aggregations