Search in sources :

Example 1 with ActiveNodeInfo

use of org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo in project hadoop by apache.

the class DFSZKFailoverController method dataToTarget.

@Override
protected HAServiceTarget dataToTarget(byte[] data) {
    ActiveNodeInfo proto;
    try {
        proto = ActiveNodeInfo.parseFrom(data);
    } catch (InvalidProtocolBufferException e) {
        throw new RuntimeException("Invalid data in ZK: " + StringUtils.byteToHexString(data));
    }
    NNHAServiceTarget ret = new NNHAServiceTarget(conf, proto.getNameserviceId(), proto.getNamenodeId());
    InetSocketAddress addressFromProtobuf = new InetSocketAddress(proto.getHostname(), proto.getPort());
    if (!addressFromProtobuf.equals(ret.getAddress())) {
        throw new RuntimeException("Mismatched address stored in ZK for " + ret + ": Stored protobuf was " + proto + ", address from our own " + "configuration for this NameNode was " + ret.getAddress());
    }
    ret.setZkfcPort(proto.getZkfcPort());
    return ret;
}
Also used : ActiveNodeInfo(org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo) InetSocketAddress(java.net.InetSocketAddress) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException)

Example 2 with ActiveNodeInfo

use of org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo in project hbase by apache.

the class RestartActiveNameNodeAction method perform.

@Override
public void perform() throws Exception {
    getLogger().info("Performing action: Restart active namenode");
    Configuration conf = CommonFSUtils.getRootDir(getConf()).getFileSystem(getConf()).getConf();
    String nameServiceID = DFSUtil.getNamenodeNameServiceId(conf);
    if (!HAUtil.isHAEnabled(conf, nameServiceID)) {
        throw new Exception("HA for namenode is not enabled");
    }
    ZKWatcher zkw = null;
    RecoverableZooKeeper rzk = null;
    String activeNamenode = null;
    String hadoopHAZkNode = conf.get(ZK_PARENT_ZNODE_KEY, ZK_PARENT_ZNODE_DEFAULT);
    try {
        zkw = new ZKWatcher(conf, "get-active-namenode", null);
        rzk = zkw.getRecoverableZooKeeper();
        String hadoopHAZkNodePath = ZNodePaths.joinZNode(hadoopHAZkNode, nameServiceID);
        List<String> subChildern = ZKUtil.listChildrenNoWatch(zkw, hadoopHAZkNodePath);
        for (String eachEntry : subChildern) {
            if (eachEntry.contains(ACTIVE_NN_LOCK_NAME)) {
                byte[] data = rzk.getData(ZNodePaths.joinZNode(hadoopHAZkNodePath, ACTIVE_NN_LOCK_NAME), false, null);
                ActiveNodeInfo proto = ActiveNodeInfo.parseFrom(data);
                activeNamenode = proto.getHostname();
            }
        }
    } finally {
        if (zkw != null) {
            zkw.close();
        }
    }
    if (activeNamenode == null) {
        throw new Exception("No active Name node found in zookeeper under " + hadoopHAZkNode);
    }
    getLogger().info("Found active namenode host:" + activeNamenode);
    ServerName activeNNHost = ServerName.valueOf(activeNamenode, -1, -1);
    getLogger().info("Restarting Active NameNode :" + activeNamenode);
    restartNameNode(activeNNHost, sleepTime);
}
Also used : ActiveNodeInfo(org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo) RecoverableZooKeeper(org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper) Configuration(org.apache.hadoop.conf.Configuration) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) ServerName(org.apache.hadoop.hbase.ServerName)

Aggregations

ActiveNodeInfo (org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo)2 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)1 InetSocketAddress (java.net.InetSocketAddress)1 Configuration (org.apache.hadoop.conf.Configuration)1 ServerName (org.apache.hadoop.hbase.ServerName)1 RecoverableZooKeeper (org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper)1 ZKWatcher (org.apache.hadoop.hbase.zookeeper.ZKWatcher)1