Search in sources :

Example 51 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class HRegionInfo method parseFrom.

/**
   * @param bytes A pb RegionInfo serialized with a pb magic prefix.
   * @param offset starting point in the byte array
   * @param len length to read on the byte array
   * @return A deserialized {@link HRegionInfo}
   * @throws DeserializationException
   * @see #toByteArray()
   */
public static HRegionInfo parseFrom(final byte[] bytes, int offset, int len) throws DeserializationException {
    if (ProtobufUtil.isPBMagicPrefix(bytes, offset, len)) {
        int pblen = ProtobufUtil.lengthOfPBMagic();
        try {
            HBaseProtos.RegionInfo.Builder builder = HBaseProtos.RegionInfo.newBuilder();
            ProtobufUtil.mergeFrom(builder, bytes, pblen + offset, len - pblen);
            HBaseProtos.RegionInfo ri = builder.build();
            return convert(ri);
        } catch (IOException e) {
            throw new DeserializationException(e);
        }
    } else {
        throw new DeserializationException("PB encoded HRegionInfo expected");
    }
}
Also used : RegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo) IOException(java.io.IOException) RegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 52 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class HTableDescriptor method parseFrom.

/**
   * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
   * @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
   * @throws DeserializationException
   * @throws IOException
   * @see #toByteArray()
   */
public static HTableDescriptor parseFrom(final byte[] bytes) throws DeserializationException, IOException {
    if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
        throw new DeserializationException("Expected PB encoded HTableDescriptor");
    }
    int pblen = ProtobufUtil.lengthOfPBMagic();
    TableSchema.Builder builder = TableSchema.newBuilder();
    TableSchema ts;
    try {
        ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
        ts = builder.build();
    } catch (IOException e) {
        throw new DeserializationException(e);
    }
    return ProtobufUtil.convertToHTableDesc(ts);
}
Also used : TableSchema(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 53 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class ProtobufUtil method toServerName.

/**
   * Get a ServerName from the passed in data bytes.
   * @param data Data with a serialize server name in it; can handle the old style
   * servername where servername was host and port.  Works too with data that
   * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that
   * has a serialized {@link ServerName} in it.
   * @return Returns null if <code>data</code> is null else converts passed data
   * to a ServerName instance.
   * @throws DeserializationException 
   */
public static ServerName toServerName(final byte[] data) throws DeserializationException {
    if (data == null || data.length <= 0)
        return null;
    if (ProtobufMagic.isPBMagicPrefix(data)) {
        int prefixLen = ProtobufMagic.lengthOfPBMagic();
        try {
            ZooKeeperProtos.Master rss = ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen);
            org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn = rss.getMaster();
            return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode());
        } catch (/*InvalidProtocolBufferException*/
        IOException e) {
            // Fail fast if it does.
            throw new DeserializationException(e);
        }
    }
    // The str returned could be old style -- pre hbase-1502 -- which was
    // hostname and port seperated by a colon rather than hostname, port and
    // startcode delimited by a ','.
    String str = Bytes.toString(data);
    int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR);
    if (index != -1) {
        // Presume its ServerName serialized with versioned bytes.
        return ServerName.parseVersionedServerName(data);
    }
    // Presume it a hostname:port format.
    String hostname = Addressing.parseHostname(str);
    int port = Addressing.parsePort(str);
    return ServerName.valueOf(hostname, port, -1L);
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) ZooKeeperProtos(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos) HBaseProtos(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 54 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class ZKSplitLogManagerCoordination method setRecoveryMode.

/**
   * This function is to set recovery mode from outstanding split log tasks from before or current
   * configuration setting
   * @param isForInitialization
   * @throws IOException
   */
@Override
public void setRecoveryMode(boolean isForInitialization) throws IOException {
    synchronized (this) {
        if (this.isDrainingDone) {
            // date recovery mode
            return;
        }
    }
    if (this.watcher == null) {
        // when watcher is null(testing code) and recovery mode can only be LOG_SPLITTING
        synchronized (this) {
            this.isDrainingDone = true;
            this.recoveryMode = RecoveryMode.LOG_SPLITTING;
        }
        return;
    }
    boolean hasSplitLogTask = false;
    boolean hasRecoveringRegions = false;
    RecoveryMode previousRecoveryMode = RecoveryMode.UNKNOWN;
    RecoveryMode recoveryModeInConfig = (isDistributedLogReplay(conf)) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING;
    // Firstly check if there are outstanding recovering regions
    try {
        List<String> regions = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.recoveringRegionsZNode);
        if (regions != null && !regions.isEmpty()) {
            hasRecoveringRegions = true;
            previousRecoveryMode = RecoveryMode.LOG_REPLAY;
        }
        if (previousRecoveryMode == RecoveryMode.UNKNOWN) {
            // Secondly check if there are outstanding split log task
            List<String> tasks = listSplitLogTasks();
            if (!tasks.isEmpty()) {
                hasSplitLogTask = true;
                if (isForInitialization) {
                    // during initialization, try to get recovery mode from splitlogtask
                    int listSize = tasks.size();
                    for (int i = 0; i < listSize; i++) {
                        String task = tasks.get(i);
                        try {
                            byte[] data = ZKUtil.getData(this.watcher, ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, task));
                            if (data == null)
                                continue;
                            SplitLogTask slt = SplitLogTask.parseFrom(data);
                            previousRecoveryMode = slt.getMode();
                            if (previousRecoveryMode == RecoveryMode.UNKNOWN) {
                                // created by old code base where we don't set recovery mode in splitlogtask
                                // we can safely set to LOG_SPLITTING because we're in master initialization code
                                // before SSH is enabled & there is no outstanding recovering regions
                                previousRecoveryMode = RecoveryMode.LOG_SPLITTING;
                            }
                            break;
                        } catch (DeserializationException e) {
                            LOG.warn("Failed parse data for znode " + task, e);
                        } catch (InterruptedException e) {
                            throw new InterruptedIOException();
                        }
                    }
                }
            }
        }
    } catch (KeeperException e) {
        throw new IOException(e);
    }
    synchronized (this) {
        if (this.isDrainingDone) {
            return;
        }
        if (!hasSplitLogTask && !hasRecoveringRegions) {
            this.isDrainingDone = true;
            this.recoveryMode = recoveryModeInConfig;
            return;
        } else if (!isForInitialization) {
            // splitlogtask hasn't drained yet, keep existing recovery mode
            return;
        }
        if (previousRecoveryMode != RecoveryMode.UNKNOWN) {
            this.isDrainingDone = (previousRecoveryMode == recoveryModeInConfig);
            this.recoveryMode = previousRecoveryMode;
        } else {
            this.recoveryMode = recoveryModeInConfig;
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) RecoveryMode(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) SplitLogTask(org.apache.hadoop.hbase.SplitLogTask) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) KeeperException(org.apache.zookeeper.KeeperException)

Example 55 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class HMaster method getClusterStatus.

/**
   * @return cluster status
   */
public ClusterStatus getClusterStatus() throws InterruptedIOException {
    // Build Set of backup masters from ZK nodes
    List<String> backupMasterStrings;
    try {
        backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper, this.zooKeeper.znodePaths.backupMasterAddressesZNode);
    } catch (KeeperException e) {
        LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
        backupMasterStrings = null;
    }
    List<ServerName> backupMasters = null;
    if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
        backupMasters = new ArrayList<>(backupMasterStrings.size());
        for (String s : backupMasterStrings) {
            try {
                byte[] bytes;
                try {
                    bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(this.zooKeeper.znodePaths.backupMasterAddressesZNode, s));
                } catch (InterruptedException e) {
                    throw new InterruptedIOException();
                }
                if (bytes != null) {
                    ServerName sn;
                    try {
                        sn = ProtobufUtil.parseServerNameFrom(bytes);
                    } catch (DeserializationException e) {
                        LOG.warn("Failed parse, skipping registering backup server", e);
                        continue;
                    }
                    backupMasters.add(sn);
                }
            } catch (KeeperException e) {
                LOG.warn(this.zooKeeper.prefix("Unable to get information about " + "backup servers"), e);
            }
        }
        Collections.sort(backupMasters, new Comparator<ServerName>() {

            @Override
            public int compare(ServerName s1, ServerName s2) {
                return s1.getServerName().compareTo(s2.getServerName());
            }
        });
    }
    String clusterId = fileSystemManager != null ? fileSystemManager.getClusterId().toString() : null;
    Set<RegionState> regionsInTransition = assignmentManager != null ? assignmentManager.getRegionStates().getRegionsInTransition() : null;
    String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
    boolean balancerOn = loadBalancerTracker != null ? loadBalancerTracker.isBalancerOn() : false;
    Map<ServerName, ServerLoad> onlineServers = null;
    Set<ServerName> deadServers = null;
    if (serverManager != null) {
        deadServers = serverManager.getDeadServers().copyServerNames();
        onlineServers = serverManager.getOnlineServers();
    }
    return new ClusterStatus(VersionInfo.getVersion(), clusterId, onlineServers, deadServers, serverName, backupMasters, regionsInTransition, coprocessors, balancerOn);
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) ServerLoad(org.apache.hadoop.hbase.ServerLoad) ServerName(org.apache.hadoop.hbase.ServerName) KeeperException(org.apache.zookeeper.KeeperException) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus)

Aggregations

DeserializationException (org.apache.hadoop.hbase.exceptions.DeserializationException)83 IOException (java.io.IOException)57 InvalidProtocolBufferException (org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException)15 FilterProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos)13 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)12 KeeperException (org.apache.zookeeper.KeeperException)12 ArrayList (java.util.ArrayList)11 ServerName (org.apache.hadoop.hbase.ServerName)9 Cell (org.apache.hadoop.hbase.Cell)8 CompareOperator (org.apache.hadoop.hbase.CompareOperator)8 InterruptedIOException (java.io.InterruptedIOException)7 CellVisibility (org.apache.hadoop.hbase.security.visibility.CellVisibility)7 ByteArrayInputStream (java.io.ByteArrayInputStream)6 Tag (org.apache.hadoop.hbase.Tag)6 HBaseProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos)6 Map (java.util.Map)5 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)5 TableName (org.apache.hadoop.hbase.TableName)5 FilterList (org.apache.hadoop.hbase.filter.FilterList)5 List (java.util.List)4