Search in sources :

Example 11 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class ZKSplitLog method getRegionFlushedSequenceId.

/**
   * This function is used in distributedLogReplay to fetch last flushed sequence id from ZK
   * @param zkw
   * @param serverName
   * @param encodedRegionName
   * @return the last flushed sequence ids recorded in ZK of the region for <code>serverName</code>
   * @throws IOException
   */
public static RegionStoreSequenceIds getRegionFlushedSequenceId(ZooKeeperWatcher zkw, String serverName, String encodedRegionName) throws IOException {
    // when SplitLogWorker recovers a region by directly replaying unflushed WAL edits,
    // last flushed sequence Id changes when newly assigned RS flushes writes to the region.
    // If the newly assigned RS fails again(a chained RS failures scenario), the last flushed
    // sequence Id name space (sequence Id only valid for a particular RS instance), changes
    // when different newly assigned RS flushes the region.
    // Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of
    // last flushed sequence Id for each failed RS instance.
    RegionStoreSequenceIds result = null;
    String nodePath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, encodedRegionName);
    nodePath = ZKUtil.joinZNode(nodePath, serverName);
    try {
        byte[] data;
        try {
            data = ZKUtil.getData(zkw, nodePath);
        } catch (InterruptedException e) {
            throw new InterruptedIOException();
        }
        if (data != null) {
            result = ZKUtil.parseRegionStoreSequenceIds(data);
        }
    } catch (KeeperException e) {
        throw new IOException("Cannot get lastFlushedSequenceId from ZooKeeper for server=" + serverName + "; region=" + encodedRegionName, e);
    } catch (DeserializationException e) {
        LOG.warn("Can't parse last flushed sequence Id from znode:" + nodePath, e);
    }
    return result;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) RegionStoreSequenceIds(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 12 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class MasterAddressTracker method deleteIfEquals.

/**
   * delete the master znode if its content is same as the parameter
   * @param zkw must not be null
   * @param content must not be null
   */
public static boolean deleteIfEquals(ZooKeeperWatcher zkw, final String content) {
    if (content == null) {
        throw new IllegalArgumentException("Content must not be null");
    }
    try {
        Stat stat = new Stat();
        byte[] data = ZKUtil.getDataNoWatch(zkw, zkw.znodePaths.masterAddressZNode, stat);
        ServerName sn = ProtobufUtil.parseServerNameFrom(data);
        if (sn != null && content.equals(sn.toString())) {
            return (ZKUtil.deleteNode(zkw, zkw.znodePaths.masterAddressZNode, stat.getVersion()));
        }
    } catch (KeeperException e) {
        LOG.warn("Can't get or delete the master znode", e);
    } catch (DeserializationException e) {
        LOG.warn("Can't get or delete the master znode", e);
    }
    return false;
}
Also used : Stat(org.apache.zookeeper.data.Stat) ServerName(org.apache.hadoop.hbase.ServerName) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 13 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class MetaTableLocator method setMetaLocation.

/**
   * Sets the location of <code>hbase:meta</code> in ZooKeeper to the
   * specified server address.
   * @param zookeeper
   * @param serverName
   * @param replicaId
   * @param state
   * @throws KeeperException
   */
public static void setMetaLocation(ZooKeeperWatcher zookeeper, ServerName serverName, int replicaId, RegionState.State state) throws KeeperException {
    LOG.info("Setting hbase:meta region location in ZooKeeper as " + serverName);
    // Make the MetaRegionServer pb and then get its bytes and save this as
    // the znode content.
    MetaRegionServer pbrsr = MetaRegionServer.newBuilder().setServer(ProtobufUtil.toServerName(serverName)).setRpcVersion(HConstants.RPC_CURRENT_VERSION).setState(state.convert()).build();
    byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray());
    try {
        ZKUtil.setData(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId), data);
    } catch (KeeperException.NoNodeException nne) {
        if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) {
            LOG.debug("META region location doesn't exist, create it");
        } else {
            LOG.debug("META region location doesn't exist for replicaId " + replicaId + ", create it");
        }
        ZKUtil.createAndWatch(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId), data);
    }
}
Also used : MetaRegionServer(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer) KeeperException(org.apache.zookeeper.KeeperException)

Example 14 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class MetaTableLocator method blockUntilAvailable.

/**
   * Wait until the primary meta region is available. Get the secondary
   * locations as well but don't block for those.
   * @param zkw
   * @param timeout
   * @param conf
   * @return ServerName or null if we timed out.
   * @throws InterruptedException
   */
public List<ServerName> blockUntilAvailable(final ZooKeeperWatcher zkw, final long timeout, Configuration conf) throws InterruptedException {
    int numReplicasConfigured = 1;
    List<ServerName> servers = new ArrayList<>();
    // Make the blocking call first so that we do the wait to know
    // the znodes are all in place or timeout.
    ServerName server = blockUntilAvailable(zkw, timeout);
    if (server == null)
        return null;
    servers.add(server);
    try {
        List<String> metaReplicaNodes = zkw.getMetaReplicaNodes();
        numReplicasConfigured = metaReplicaNodes.size();
    } catch (KeeperException e) {
        LOG.warn("Got ZK exception " + e);
    }
    for (int replicaId = 1; replicaId < numReplicasConfigured; replicaId++) {
        // return all replica locations for the meta
        servers.add(getMetaRegionLocation(zkw, replicaId));
    }
    return servers;
}
Also used : ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) KeeperException(org.apache.zookeeper.KeeperException)

Example 15 with KeeperException

use of org.apache.zookeeper.KeeperException in project hbase by apache.

the class RecoverableZooKeeper method getAcl.

/**
   * getAcl is an idempotent operation. Retry before throwing exception
   * @return list of ACLs
   */
public List<ACL> getAcl(String path, Stat stat) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.getAcl");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                return checkZk().getACL(path, stat);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "getAcl");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}
Also used : RetryCounter(org.apache.hadoop.hbase.util.RetryCounter) TraceScope(org.apache.htrace.TraceScope) KeeperException(org.apache.zookeeper.KeeperException)

Aggregations

KeeperException (org.apache.zookeeper.KeeperException)345 IOException (java.io.IOException)114 Stat (org.apache.zookeeper.data.Stat)79 ZooKeeper (org.apache.zookeeper.ZooKeeper)54 Test (org.junit.Test)37 NoNodeException (org.apache.zookeeper.KeeperException.NoNodeException)36 ArrayList (java.util.ArrayList)30 SolrException (org.apache.solr.common.SolrException)30 HeliosRuntimeException (com.spotify.helios.common.HeliosRuntimeException)24 HashMap (java.util.HashMap)21 WatchedEvent (org.apache.zookeeper.WatchedEvent)20 Watcher (org.apache.zookeeper.Watcher)20 InterruptedIOException (java.io.InterruptedIOException)19 Map (java.util.Map)19 ZooKeeperClient (com.spotify.helios.servicescommon.coordination.ZooKeeperClient)17 ServerName (org.apache.hadoop.hbase.ServerName)15 ACL (org.apache.zookeeper.data.ACL)15 List (java.util.List)14 CountDownLatch (java.util.concurrent.CountDownLatch)14 RetryCounter (org.apache.hadoop.hbase.util.RetryCounter)13