Search in sources :

Example 1 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class ConnectionImplementation method checkIfBaseNodeAvailable.

private void checkIfBaseNodeAvailable(ZooKeeperWatcher zkw) throws MasterNotRunningException {
    String errorMsg;
    try {
        if (ZKUtil.checkExists(zkw, zkw.znodePaths.baseZNode) == -1) {
            errorMsg = "The node " + zkw.znodePaths.baseZNode + " is not in ZooKeeper. " + "It should have been written by the master. " + "Check the value configured in 'zookeeper.znode.parent'. " + "There could be a mismatch with the one configured in the master.";
            LOG.error(errorMsg);
            throw new MasterNotRunningException(errorMsg);
        }
    } catch (KeeperException e) {
        errorMsg = "Can't get connection to ZooKeeper: " + e.getMessage();
        LOG.error(errorMsg);
        throw new MasterNotRunningException(errorMsg, e);
    }
}
Also used : MasterNotRunningException(org.apache.hadoop.hbase.MasterNotRunningException) KeeperException(org.apache.zookeeper.KeeperException)

Example 2 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class VerifyingRSGroupAdminClient method verify.

public void verify() throws IOException {
    Map<String, RSGroupInfo> groupMap = Maps.newHashMap();
    Set<RSGroupInfo> zList = Sets.newHashSet();
    for (Result result : table.getScanner(new Scan())) {
        RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(result.getValue(RSGroupInfoManager.META_FAMILY_BYTES, RSGroupInfoManager.META_QUALIFIER_BYTES));
        groupMap.put(proto.getName(), RSGroupProtobufUtil.toGroupInfo(proto));
    }
    Assert.assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(wrapped.listRSGroups()));
    try {
        String groupBasePath = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "rsgroup");
        for (String znode : ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
            byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode));
            if (data.length > 0) {
                ProtobufUtil.expectPBMagicPrefix(data);
                ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
                zList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
            }
        }
        Assert.assertEquals(zList.size(), groupMap.size());
        for (RSGroupInfo RSGroupInfo : zList) {
            Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo));
        }
    } catch (KeeperException e) {
        throw new IOException("ZK verification failed", e);
    } catch (DeserializationException e) {
        throw new IOException("ZK verification failed", e);
    } catch (InterruptedException e) {
        throw new IOException("ZK verification failed", e);
    }
}
Also used : RSGroupProtos(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) Result(org.apache.hadoop.hbase.client.Result) ByteArrayInputStream(java.io.ByteArrayInputStream) Scan(org.apache.hadoop.hbase.client.Scan) KeeperException(org.apache.zookeeper.KeeperException)

Example 3 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class ZKSplitLog method getRegionFlushedSequenceId.

/**
   * This function is used in distributedLogReplay to fetch last flushed sequence id from ZK
   * @param zkw
   * @param serverName
   * @param encodedRegionName
   * @return the last flushed sequence ids recorded in ZK of the region for <code>serverName</code>
   * @throws IOException
   */
public static RegionStoreSequenceIds getRegionFlushedSequenceId(ZooKeeperWatcher zkw, String serverName, String encodedRegionName) throws IOException {
    // when SplitLogWorker recovers a region by directly replaying unflushed WAL edits,
    // last flushed sequence Id changes when newly assigned RS flushes writes to the region.
    // If the newly assigned RS fails again(a chained RS failures scenario), the last flushed
    // sequence Id name space (sequence Id only valid for a particular RS instance), changes
    // when different newly assigned RS flushes the region.
    // Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of
    // last flushed sequence Id for each failed RS instance.
    RegionStoreSequenceIds result = null;
    String nodePath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, encodedRegionName);
    nodePath = ZKUtil.joinZNode(nodePath, serverName);
    try {
        byte[] data;
        try {
            data = ZKUtil.getData(zkw, nodePath);
        } catch (InterruptedException e) {
            throw new InterruptedIOException();
        }
        if (data != null) {
            result = ZKUtil.parseRegionStoreSequenceIds(data);
        }
    } catch (KeeperException e) {
        throw new IOException("Cannot get lastFlushedSequenceId from ZooKeeper for server=" + serverName + "; region=" + encodedRegionName, e);
    } catch (DeserializationException e) {
        LOG.warn("Can't parse last flushed sequence Id from znode:" + nodePath, e);
    }
    return result;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) RegionStoreSequenceIds(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 4 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class MasterAddressTracker method deleteIfEquals.

/**
   * delete the master znode if its content is same as the parameter
   * @param zkw must not be null
   * @param content must not be null
   */
public static boolean deleteIfEquals(ZooKeeperWatcher zkw, final String content) {
    if (content == null) {
        throw new IllegalArgumentException("Content must not be null");
    }
    try {
        Stat stat = new Stat();
        byte[] data = ZKUtil.getDataNoWatch(zkw, zkw.znodePaths.masterAddressZNode, stat);
        ServerName sn = ProtobufUtil.parseServerNameFrom(data);
        if (sn != null && content.equals(sn.toString())) {
            return (ZKUtil.deleteNode(zkw, zkw.znodePaths.masterAddressZNode, stat.getVersion()));
        }
    } catch (KeeperException e) {
        LOG.warn("Can't get or delete the master znode", e);
    } catch (DeserializationException e) {
        LOG.warn("Can't get or delete the master znode", e);
    }
    return false;
}
Also used : Stat(org.apache.zookeeper.data.Stat) ServerName(org.apache.hadoop.hbase.ServerName) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 5 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class MetaTableLocator method setMetaLocation.

/**
   * Sets the location of <code>hbase:meta</code> in ZooKeeper to the
   * specified server address.
   * @param zookeeper
   * @param serverName
   * @param replicaId
   * @param state
   * @throws KeeperException
   */
public static void setMetaLocation(ZooKeeperWatcher zookeeper, ServerName serverName, int replicaId, RegionState.State state) throws KeeperException {
    LOG.info("Setting hbase:meta region location in ZooKeeper as " + serverName);
    // Make the MetaRegionServer pb and then get its bytes and save this as
    // the znode content.
    MetaRegionServer pbrsr = MetaRegionServer.newBuilder().setServer(ProtobufUtil.toServerName(serverName)).setRpcVersion(HConstants.RPC_CURRENT_VERSION).setState(state.convert()).build();
    byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
    try {
        ZKUtil.setData(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId), data);
    } catch (KeeperException.NoNodeException nne) {
        if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
            LOG.debug("META region location doesn't exist, create it");
        } else {
            LOG.debug("META region location doesn't exist for replicaId " + replicaId + ", create it");
        }
        ZKUtil.createAndWatch(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId), data);
    }
}
Also used : MetaRegionServer(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer) KeeperException(org.apache.zookeeper.KeeperException)

Aggregations

KeeperException (org.apache.zookeeper.KeeperException)566 IOException (java.io.IOException)188 Stat (org.apache.zookeeper.data.Stat)127 ZooKeeper (org.apache.zookeeper.ZooKeeper)87 ArrayList (java.util.ArrayList)51 NoNodeException (org.apache.zookeeper.KeeperException.NoNodeException)45 Watcher (org.apache.zookeeper.Watcher)39 WatchedEvent (org.apache.zookeeper.WatchedEvent)38 Test (org.junit.jupiter.api.Test)38 CountDownLatch (java.util.concurrent.CountDownLatch)30 SolrException (org.apache.solr.common.SolrException)30 HashMap (java.util.HashMap)29 List (java.util.List)28 ACL (org.apache.zookeeper.data.ACL)27 Test (org.junit.Test)27 HeliosRuntimeException (com.spotify.helios.common.HeliosRuntimeException)25 ServerName (org.apache.hadoop.hbase.ServerName)24 Map (java.util.Map)23 IZooReaderWriter (org.apache.accumulo.fate.zookeeper.IZooReaderWriter)23 InterruptedIOException (java.io.InterruptedIOException)20