Search in sources :

Example 46 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class RSGroupInfoManagerImpl method retrieveGroupListFromZookeeper.

private List<RSGroupInfo> retrieveGroupListFromZookeeper() throws IOException {
    String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE);
    List<RSGroupInfo> RSGroupInfoList = Lists.newArrayList();
    // Overwrite any info stored by table, this takes precedence
    try {
        if (ZKUtil.checkExists(watcher, groupBasePath) != -1) {
            List<String> children = ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath);
            if (children == null) {
                return RSGroupInfoList;
            }
            for (String znode : children) {
                byte[] data = ZKUtil.getData(watcher, ZNodePaths.joinZNode(groupBasePath, znode));
                if (data != null && data.length > 0) {
                    ProtobufUtil.expectPBMagicPrefix(data);
                    ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
                    RSGroupInfoList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
                }
            }
            LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size());
        }
    } catch (KeeperException | DeserializationException | InterruptedException e) {
        throw new IOException("Failed to read rsGroupZNode", e);
    }
    return RSGroupInfoList;
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) KeeperException(org.apache.zookeeper.KeeperException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 47 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class PutSortReducer method reduce.

@Override
protected void reduce(ImmutableBytesWritable row, java.lang.Iterable<Put> puts, Reducer<ImmutableBytesWritable, Put, ImmutableBytesWritable, KeyValue>.Context<ImmutableBytesWritable, Put, ImmutableBytesWritable, KeyValue> context) throws java.io.IOException, InterruptedException {
    // although reduce() is called per-row, handle pathological case
    long threshold = context.getConfiguration().getLong("putsortreducer.row.threshold", 1L * (1 << 30));
    Iterator<Put> iter = puts.iterator();
    while (iter.hasNext()) {
        TreeSet<KeyValue> map = new TreeSet<>(CellComparator.COMPARATOR);
        long curSize = 0;
        // stop at the end or the RAM threshold
        List<Tag> tags = new ArrayList<>();
        while (iter.hasNext() && curSize < threshold) {
            // clear the tags
            tags.clear();
            Put p = iter.next();
            long t = p.getTTL();
            if (t != Long.MAX_VALUE) {
                // add TTL tag if found
                tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(t)));
            }
            byte[] acl = p.getACL();
            if (acl != null) {
                // add ACL tag if found
                tags.add(new ArrayBackedTag(TagType.ACL_TAG_TYPE, acl));
            }
            try {
                CellVisibility cellVisibility = p.getCellVisibility();
                if (cellVisibility != null) {
                    // add the visibility labels if any
                    tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags(cellVisibility.getExpression()));
                }
            } catch (DeserializationException e) {
                // just ignoring the bad one?
                throw new IOException("Invalid visibility expression found in mutation " + p, e);
            }
            for (List<Cell> cells : p.getFamilyCellMap().values()) {
                for (Cell cell : cells) {
                    // Creating the KV which needs to be directly written to HFiles. Using the Facade
                    // KVCreator for creation of kvs.
                    KeyValue kv = null;
                    TagUtil.carryForwardTags(tags, cell);
                    if (!tags.isEmpty()) {
                        kv = (KeyValue) kvCreator.create(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), tags);
                    } else {
                        kv = KeyValueUtil.ensureKeyValue(cell);
                    }
                    if (map.add(kv)) {
                        // don't count duplicated kv into size
                        curSize += kv.heapSize();
                    }
                }
            }
        }
        context.setStatus("Read " + map.size() + " entries of " + map.getClass() + "(" + StringUtils.humanReadableInt(curSize) + ")");
        int index = 0;
        for (KeyValue kv : map) {
            context.write(row, kv);
            if (++index % 100 == 0)
                context.setStatus("Wrote " + index);
        }
        // if we have more entries to process
        if (iter.hasNext()) {
            // force flush because we cannot guarantee intra-row sorted order
            context.write(null, null);
        }
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) CellVisibility(org.apache.hadoop.hbase.security.visibility.CellVisibility) ArrayList(java.util.ArrayList) IOException(java.io.IOException) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Put(org.apache.hadoop.hbase.client.Put) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) TreeSet(java.util.TreeSet) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell)

Example 48 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class ReplicationSerDeHelper method parsePeerFrom.

/**
   * @param bytes Content of a peer znode.
   * @return ClusterKey parsed from the passed bytes.
   * @throws DeserializationException
   */
public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) throws DeserializationException {
    if (ProtobufUtil.isPBMagicPrefix(bytes)) {
        int pblen = ProtobufUtil.lengthOfPBMagic();
        ReplicationProtos.ReplicationPeer.Builder builder = ReplicationProtos.ReplicationPeer.newBuilder();
        ReplicationProtos.ReplicationPeer peer;
        try {
            ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
            peer = builder.build();
        } catch (IOException e) {
            throw new DeserializationException(e);
        }
        return convert(peer);
    } else {
        if (bytes.length > 0) {
            return new ReplicationPeerConfig().setClusterKey(Bytes.toString(bytes));
        }
        return new ReplicationPeerConfig().setClusterKey("");
    }
}
Also used : ReplicationProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Example 49 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class ZKSplitLogManagerCoordination method removeStaleRecoveringRegions.

/**
   * ZooKeeper implementation of
   * {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
   */
@Override
public void removeStaleRecoveringRegions(final Set<String> knownFailedServers) throws IOException, InterruptedIOException {
    try {
        List<String> tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode);
        if (tasks != null) {
            int listSize = tasks.size();
            for (int i = 0; i < listSize; i++) {
                String t = tasks.get(i);
                byte[] data;
                try {
                    data = ZKUtil.getData(this.watcher, ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, t));
                } catch (InterruptedException e) {
                    throw new InterruptedIOException();
                }
                if (data != null) {
                    SplitLogTask slt = null;
                    try {
                        slt = SplitLogTask.parseFrom(data);
                    } catch (DeserializationException e) {
                        LOG.warn("Failed parse data for znode " + t, e);
                    }
                    if (slt != null && slt.isDone()) {
                        continue;
                    }
                }
                // decode the file name
                t = ZKSplitLog.getFileName(t);
                ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(new Path(t));
                if (serverName != null) {
                    knownFailedServers.add(serverName.getServerName());
                } else {
                    LOG.warn("Found invalid WAL log file name:" + t);
                }
            }
        }
        // remove recovering regions which doesn't have any RS associated with it
        List<String> regions = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.recoveringRegionsZNode);
        if (regions != null) {
            int listSize = regions.size();
            for (int i = 0; i < listSize; i++) {
                String nodePath = ZKUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, regions.get(i));
                List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(watcher, nodePath);
                if (regionFailedServers == null || regionFailedServers.isEmpty()) {
                    ZKUtil.deleteNode(watcher, nodePath);
                    continue;
                }
                boolean needMoreRecovery = false;
                int tmpFailedServerSize = regionFailedServers.size();
                for (int j = 0; j < tmpFailedServerSize; j++) {
                    if (knownFailedServers.contains(regionFailedServers.get(j))) {
                        needMoreRecovery = true;
                        break;
                    }
                }
                if (!needMoreRecovery) {
                    ZKUtil.deleteNodeRecursively(watcher, nodePath);
                }
            }
        }
    } catch (KeeperException e) {
        throw new IOException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) ServerName(org.apache.hadoop.hbase.ServerName) SplitLogTask(org.apache.hadoop.hbase.SplitLogTask) KeeperException(org.apache.zookeeper.KeeperException)

Example 50 with DeserializationException

use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.

the class HColumnDescriptor method parseFrom.

/**
   * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
   * @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
   * @throws DeserializationException
   * @see #toByteArray()
   */
public static HColumnDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
    if (!ProtobufUtil.isPBMagicPrefix(bytes))
        throw new DeserializationException("No magic");
    int pblen = ProtobufUtil.lengthOfPBMagic();
    ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
    ColumnFamilySchema cfs = null;
    try {
        ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
        cfs = builder.build();
    } catch (IOException e) {
        throw new DeserializationException(e);
    }
    return ProtobufUtil.convertToHColumnDesc(cfs);
}
Also used : ColumnFamilySchema(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ColumnFamilySchema) IOException(java.io.IOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException)

Aggregations

DeserializationException (org.apache.hadoop.hbase.exceptions.DeserializationException)83 IOException (java.io.IOException)57 InvalidProtocolBufferException (org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException)15 FilterProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos)13 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)12 KeeperException (org.apache.zookeeper.KeeperException)12 ArrayList (java.util.ArrayList)11 ServerName (org.apache.hadoop.hbase.ServerName)9 Cell (org.apache.hadoop.hbase.Cell)8 CompareOperator (org.apache.hadoop.hbase.CompareOperator)8 InterruptedIOException (java.io.InterruptedIOException)7 CellVisibility (org.apache.hadoop.hbase.security.visibility.CellVisibility)7 ByteArrayInputStream (java.io.ByteArrayInputStream)6 Tag (org.apache.hadoop.hbase.Tag)6 HBaseProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos)6 Map (java.util.Map)5 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)5 TableName (org.apache.hadoop.hbase.TableName)5 FilterList (org.apache.hadoop.hbase.filter.FilterList)5 List (java.util.List)4