Search in sources :

Example 1 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class VerifyingRSGroupAdminClient method verify.

public void verify() throws IOException {
    Map<String, RSGroupInfo> groupMap = Maps.newHashMap();
    Set<RSGroupInfo> zList = Sets.newHashSet();
    for (Result result : table.getScanner(new Scan())) {
        RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(result.getValue(RSGroupInfoManager.META_FAMILY_BYTES, RSGroupInfoManager.META_QUALIFIER_BYTES));
        groupMap.put(proto.getName(), RSGroupProtobufUtil.toGroupInfo(proto));
    }
    Assert.assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(wrapped.listRSGroups()));
    try {
        String groupBasePath = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "rsgroup");
        for (String znode : ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
            byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode));
            if (data.length > 0) {
                ProtobufUtil.expectPBMagicPrefix(data);
                ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
                zList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
            }
        }
        Assert.assertEquals(zList.size(), groupMap.size());
        for (RSGroupInfo RSGroupInfo : zList) {
            Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo));
        }
    } catch (KeeperException e) {
        throw new IOException("ZK verification failed", e);
    } catch (DeserializationException e) {
        throw new IOException("ZK verification failed", e);
    } catch (InterruptedException e) {
        throw new IOException("ZK verification failed", e);
    }
}
Also used : RSGroupProtos(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) Result(org.apache.hadoop.hbase.client.Result) ByteArrayInputStream(java.io.ByteArrayInputStream) Scan(org.apache.hadoop.hbase.client.Scan) KeeperException(org.apache.zookeeper.KeeperException)

Example 2 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class AccessControlLists method readPermissions.

/**
   * Reads a set of permissions as {@link org.apache.hadoop.io.Writable} instances from the input
   * stream.
   */
public static ListMultimap<String, TablePermission> readPermissions(byte[] data, Configuration conf) throws DeserializationException {
    if (ProtobufUtil.isPBMagicPrefix(data)) {
        int pblen = ProtobufUtil.lengthOfPBMagic();
        try {
            AccessControlProtos.UsersAndPermissions.Builder builder = AccessControlProtos.UsersAndPermissions.newBuilder();
            ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
            return AccessControlUtil.toUserTablePermissions(builder.build());
        } catch (IOException e) {
            throw new DeserializationException(e);
        }
    } else {
        // TODO: We have to re-write non-PB data as PB encoded. Otherwise we will carry old Writables
        // forever (here and a couple of other places).
        ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
        try {
            DataInput in = new DataInputStream(new ByteArrayInputStream(data));
            int length = in.readInt();
            for (int i = 0; i < length; i++) {
                String user = Text.readString(in);
                List<TablePermission> userPerms = readWritablePermissions(in, conf);
                perms.putAll(user, userPerms);
            }
        } catch (IOException | ClassNotFoundException e) {
            throw new DeserializationException(e);
        }
        return perms;
    }
}
Also used : IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) DataInput(java.io.DataInput) ByteArrayInputStream(java.io.ByteArrayInputStream)

Example 3 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class ZKSplitLog method getRegionFlushedSequenceId.

/**
   * This function is used in distributedLogReplay to fetch last flushed sequence id from ZK
   * @param zkw
   * @param serverName
   * @param encodedRegionName
   * @return the last flushed sequence ids recorded in ZK of the region for <code>serverName</code>
   * @throws IOException
   */
public static RegionStoreSequenceIds getRegionFlushedSequenceId(ZooKeeperWatcher zkw, String serverName, String encodedRegionName) throws IOException {
    // when SplitLogWorker recovers a region by directly replaying unflushed WAL edits,
    // last flushed sequence Id changes when newly assigned RS flushes writes to the region.
    // If the newly assigned RS fails again(a chained RS failures scenario), the last flushed
    // sequence Id name space (sequence Id only valid for a particular RS instance), changes
    // when different newly assigned RS flushes the region.
    // Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of
    // last flushed sequence Id for each failed RS instance.
    RegionStoreSequenceIds result = null;
    String nodePath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, encodedRegionName);
    nodePath = ZKUtil.joinZNode(nodePath, serverName);
    try {
        byte[] data;
        try {
            data = ZKUtil.getData(zkw, nodePath);
        } catch (InterruptedException e) {
            throw new InterruptedIOException();
        }
        if (data != null) {
            result = ZKUtil.parseRegionStoreSequenceIds(data);
        }
    } catch (KeeperException e) {
        throw new IOException("Cannot get lastFlushedSequenceId from ZooKeeper for server=" + serverName + "; region=" + encodedRegionName, e);
    } catch (DeserializationException e) {
        LOG.warn("Can't parse last flushed sequence Id from znode:" + nodePath, e);
    }
    return result;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) RegionStoreSequenceIds(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 4 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class MasterAddressTracker method deleteIfEquals.

/**
   * delete the master znode if its content is same as the parameter
   * @param zkw must not be null
   * @param content must not be null
   */
public static boolean deleteIfEquals(ZooKeeperWatcher zkw, final String content) {
    if (content == null) {
        throw new IllegalArgumentException("Content must not be null");
    }
    try {
        Stat stat = new Stat();
        byte[] data = ZKUtil.getDataNoWatch(zkw, zkw.znodePaths.masterAddressZNode, stat);
        ServerName sn = ProtobufUtil.parseServerNameFrom(data);
        if (sn != null && content.equals(sn.toString())) {
            return (ZKUtil.deleteNode(zkw, zkw.znodePaths.masterAddressZNode, stat.getVersion()));
        }
    } catch (KeeperException e) {
        LOG.warn("Can't get or delete the master znode", e);
    } catch (DeserializationException e) {
        LOG.warn("Can't get or delete the master znode", e);
    }
    return false;
}
Also used : Stat(org.apache.zookeeper.data.Stat) ServerName(org.apache.hadoop.hbase.ServerName) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 5 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class MetaTableLocator method getMetaRegionState.

/**
   * Load the meta region state from the meta server ZNode.
   * @param zkw
   * @param replicaId
   * @return regionstate
   * @throws KeeperException
   */
public static RegionState getMetaRegionState(ZooKeeperWatcher zkw, int replicaId) throws KeeperException {
    RegionState.State state = RegionState.State.OPEN;
    ServerName serverName = null;
    try {
        byte[] data = ZKUtil.getData(zkw, zkw.znodePaths.getZNodeForReplica(replicaId));
        if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) {
            try {
                int prefixLen = ProtobufUtil.lengthOfPBMagic();
                ZooKeeperProtos.MetaRegionServer rl = ZooKeeperProtos.MetaRegionServer.PARSER.parseFrom(data, prefixLen, data.length - prefixLen);
                if (rl.hasState()) {
                    state = RegionState.State.convert(rl.getState());
                }
                HBaseProtos.ServerName sn = rl.getServer();
                serverName = ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode());
            } catch (InvalidProtocolBufferException e) {
                throw new DeserializationException("Unable to parse meta region location");
            }
        } else {
            // old style of meta region location?
            serverName = ProtobufUtil.parseServerNameFrom(data);
        }
    } catch (DeserializationException e) {
        throw ZKUtil.convert(e);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
    if (serverName == null) {
        state = RegionState.State.OFFLINE;
    }
    return new RegionState(RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO, replicaId), state, serverName);
}
Also used : MetaRegionServer(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer) RegionState(org.apache.hadoop.hbase.master.RegionState) ServerName(org.apache.hadoop.hbase.ServerName) InvalidProtocolBufferException(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException) ZooKeeperProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Aggregations

DeserializationException (org.apache.hadoop.hbase.exceptions.DeserializationException)83 IOException (java.io.IOException)57 InvalidProtocolBufferException (org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException)15 FilterProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos)13 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)12 KeeperException (org.apache.zookeeper.KeeperException)12 ArrayList (java.util.ArrayList)11 ServerName (org.apache.hadoop.hbase.ServerName)9 Cell (org.apache.hadoop.hbase.Cell)8 CompareOperator (org.apache.hadoop.hbase.CompareOperator)8 InterruptedIOException (java.io.InterruptedIOException)7 CellVisibility (org.apache.hadoop.hbase.security.visibility.CellVisibility)7 ByteArrayInputStream (java.io.ByteArrayInputStream)6 Tag (org.apache.hadoop.hbase.Tag)6 HBaseProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos)6 Map (java.util.Map)5 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)5 TableName (org.apache.hadoop.hbase.TableName)5 FilterList (org.apache.hadoop.hbase.filter.FilterList)5 List (java.util.List)4