Search in sources :

Example 26 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class Mutation method toCellVisibility.

/**
 * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
 *
 * @param protoBytes
 * @return the converted client CellVisibility
 * @throws DeserializationException
 */
private static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
    if (protoBytes == null)
        return null;
    ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
    ClientProtos.CellVisibility proto = null;
    try {
        ProtobufUtil.mergeFrom(builder, protoBytes);
        proto = builder.build();
    } catch (IOException e) {
        throw new DeserializationException(e);
    }
    return toCellVisibility(proto);
}
Also used : CellVisibility(org.apache.hadoop.hbase.security.visibility.CellVisibility) IOException(java.io.IOException) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 27 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class MetaRegionLocationCache method getMetaRegionLocation.

/**
 * Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for
 * future updates.
 * @param replicaId ReplicaID of the region.
 * @return HRegionLocation for the meta replica.
 * @throws KeeperException if there is any issue fetching/parsing the serialized data.
 */
private HRegionLocation getMetaRegionLocation(int replicaId) throws KeeperException {
    RegionState metaRegionState;
    try {
        byte[] data = ZKUtil.getDataAndWatch(watcher, watcher.getZNodePaths().getZNodeForReplica(replicaId));
        metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId);
    } catch (DeserializationException e) {
        throw ZKUtil.convert(e);
    }
    return new HRegionLocation(metaRegionState.getRegion(), metaRegionState.getServerName());
}
Also used : RegionState(org.apache.hadoop.hbase.master.RegionState) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 28 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class SplitLogTask method parseFrom.

/**
 * @param data Serialized date to parse.
 * @return An SplitLogTaskState instance made of the passed <code>data</code>
 * @throws DeserializationException
 * @see #toByteArray()
 */
public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException {
    ProtobufUtil.expectPBMagicPrefix(data);
    try {
        int prefixLen = ProtobufUtil.lengthOfPBMagic();
        ZooKeeperProtos.SplitLogTask.Builder builder = ZooKeeperProtos.SplitLogTask.newBuilder();
        ProtobufUtil.mergeFrom(builder, data, prefixLen, data.length - prefixLen);
        return new SplitLogTask(builder.build());
    } catch (IOException e) {
        throw new DeserializationException(Bytes.toStringBinary(data, 0, 64), e);
    }
}
Also used : IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 29 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class ZkSplitLogWorkerCoordination method getDataSetWatchSuccess.

void getDataSetWatchSuccess(String path, byte[] data) {
    SplitLogTask slt;
    try {
        slt = SplitLogTask.parseFrom(data);
    } catch (DeserializationException e) {
        LOG.warn("Failed parse", e);
        return;
    }
    synchronized (grabTaskLock) {
        if (workerInGrabTask) {
            // currentTask can change but that's ok
            String taskpath = currentTask;
            if (taskpath != null && taskpath.equals(path)) {
                // worker to unassigned to owned by another worker
                if (!slt.isOwned(serverName) && !slt.isDone(serverName) && !slt.isErr(serverName) && !slt.isResigned(serverName)) {
                    LOG.info("task " + taskpath + " preempted from " + serverName + ", current task state and owner=" + slt.toString());
                    worker.stopTask();
                }
            }
        }
    }
}
Also used : SplitLogTask(org.apache.hadoop.hbase.SplitLogTask) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 30 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class MasterAddressTracker method getBackupMastersAndRenewWatch.

/**
 * Retrieves the list of registered backup masters and renews a watch on the znode for children
 * updates.
 * @param zkw Zookeeper watcher to use
 * @return List of backup masters.
 * @throws InterruptedIOException if there is any issue fetching the required data from Zookeeper.
 */
public static List<ServerName> getBackupMastersAndRenewWatch(ZKWatcher zkw) throws InterruptedIOException {
    // Build Set of backup masters from ZK nodes
    List<String> backupMasterStrings = null;
    try {
        backupMasterStrings = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().backupMasterAddressesZNode);
    } catch (KeeperException e) {
        LOG.warn(zkw.prefix("Unable to list backup servers"), e);
    }
    List<ServerName> backupMasters = Collections.emptyList();
    if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
        backupMasters = new ArrayList<>(backupMasterStrings.size());
        for (String s : backupMasterStrings) {
            try {
                byte[] bytes;
                try {
                    bytes = ZKUtil.getData(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().backupMasterAddressesZNode, s));
                } catch (InterruptedException e) {
                    throw new InterruptedIOException();
                }
                if (bytes != null) {
                    ServerName sn;
                    try {
                        sn = ProtobufUtil.parseServerNameFrom(bytes);
                    } catch (DeserializationException e) {
                        LOG.warn("Failed parse, skipping registering backup server", e);
                        continue;
                    }
                    backupMasters.add(sn);
                }
            } catch (KeeperException e) {
                LOG.warn(zkw.prefix("Unable to get information about " + "backup servers"), e);
            }
        }
        backupMasters.sort(Comparator.comparing(ServerName::getServerName));
    }
    return backupMasters;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) ServerName(org.apache.hadoop.hbase.ServerName) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Aggregations

DeserializationException (org.apache.hadoop.hbase.exceptions.DeserializationException)83 IOException (java.io.IOException)57 InvalidProtocolBufferException (org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException)15 FilterProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos)13 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)12 KeeperException (org.apache.zookeeper.KeeperException)12 ArrayList (java.util.ArrayList)11 ServerName (org.apache.hadoop.hbase.ServerName)9 Cell (org.apache.hadoop.hbase.Cell)8 CompareOperator (org.apache.hadoop.hbase.CompareOperator)8 InterruptedIOException (java.io.InterruptedIOException)7 CellVisibility (org.apache.hadoop.hbase.security.visibility.CellVisibility)7 ByteArrayInputStream (java.io.ByteArrayInputStream)6 Tag (org.apache.hadoop.hbase.Tag)6 HBaseProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos)6 Map (java.util.Map)5 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)5 TableName (org.apache.hadoop.hbase.TableName)5 FilterList (org.apache.hadoop.hbase.filter.FilterList)5 List (java.util.List)4