use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class RSGroupInfoManagerImpl method retrieveGroupListFromZookeeper.
private List<RSGroupInfo> retrieveGroupListFromZookeeper() throws IOException {
String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE);
List<RSGroupInfo> RSGroupInfoList = Lists.newArrayList();
// Overwrite any info stored by table, this takes precedence
try {
if (ZKUtil.checkExists(watcher, groupBasePath) != -1) {
List<String> children = ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath);
if (children == null) {
return RSGroupInfoList;
}
for (String znode : children) {
byte[] data = ZKUtil.getData(watcher, ZNodePaths.joinZNode(groupBasePath, znode));
if (data != null && data.length > 0) {
ProtobufUtil.expectPBMagicPrefix(data);
ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
RSGroupInfoList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
}
}
LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size());
}
} catch (KeeperException | DeserializationException | InterruptedException e) {
throw new IOException("Failed to read rsGroupZNode", e);
}
return RSGroupInfoList;
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class PutSortReducer method reduce.
@Override
protected void reduce(ImmutableBytesWritable row, java.lang.Iterable<Put> puts, Reducer<ImmutableBytesWritable, Put, ImmutableBytesWritable, KeyValue>.Context<ImmutableBytesWritable, Put, ImmutableBytesWritable, KeyValue> context) throws java.io.IOException, InterruptedException {
// although reduce() is called per-row, handle pathological case
long threshold = context.getConfiguration().getLong("putsortreducer.row.threshold", 1L * (1 << 30));
Iterator<Put> iter = puts.iterator();
while (iter.hasNext()) {
TreeSet<KeyValue> map = new TreeSet<>(CellComparator.COMPARATOR);
long curSize = 0;
// stop at the end or the RAM threshold
List<Tag> tags = new ArrayList<>();
while (iter.hasNext() && curSize < threshold) {
// clear the tags
tags.clear();
Put p = iter.next();
long t = p.getTTL();
if (t != Long.MAX_VALUE) {
// add TTL tag if found
tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(t)));
}
byte[] acl = p.getACL();
if (acl != null) {
// add ACL tag if found
tags.add(new ArrayBackedTag(TagType.ACL_TAG_TYPE, acl));
}
try {
CellVisibility cellVisibility = p.getCellVisibility();
if (cellVisibility != null) {
// add the visibility labels if any
tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags(cellVisibility.getExpression()));
}
} catch (DeserializationException e) {
// just ignoring the bad one?
throw new IOException("Invalid visibility expression found in mutation " + p, e);
}
for (List<Cell> cells : p.getFamilyCellMap().values()) {
for (Cell cell : cells) {
// Creating the KV which needs to be directly written to HFiles. Using the Facade
// KVCreator for creation of kvs.
KeyValue kv = null;
TagUtil.carryForwardTags(tags, cell);
if (!tags.isEmpty()) {
kv = (KeyValue) kvCreator.create(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), tags);
} else {
kv = KeyValueUtil.ensureKeyValue(cell);
}
if (map.add(kv)) {
// don't count duplicated kv into size
curSize += kv.heapSize();
}
}
}
}
context.setStatus("Read " + map.size() + " entries of " + map.getClass() + "(" + StringUtils.humanReadableInt(curSize) + ")");
int index = 0;
for (KeyValue kv : map) {
context.write(row, kv);
if (++index % 100 == 0)
context.setStatus("Wrote " + index);
}
// if we have more entries to process
if (iter.hasNext()) {
// force flush because we cannot guarantee intra-row sorted order
context.write(null, null);
}
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class ReplicationSerDeHelper method parsePeerFrom.
/**
* @param bytes Content of a peer znode.
* @return ClusterKey parsed from the passed bytes.
* @throws DeserializationException
*/
public static ReplicationPeerConfig parsePeerFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ReplicationProtos.ReplicationPeer.Builder builder = ReplicationProtos.ReplicationPeer.newBuilder();
ReplicationProtos.ReplicationPeer peer;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
peer = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(peer);
} else {
if (bytes.length > 0) {
return new ReplicationPeerConfig().setClusterKey(Bytes.toString(bytes));
}
return new ReplicationPeerConfig().setClusterKey("");
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class ZKSplitLogManagerCoordination method removeStaleRecoveringRegions.
/**
* ZooKeeper implementation of
* {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
*/
@Override
public void removeStaleRecoveringRegions(final Set<String> knownFailedServers) throws IOException, InterruptedIOException {
try {
List<String> tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode);
if (tasks != null) {
int listSize = tasks.size();
for (int i = 0; i < listSize; i++) {
String t = tasks.get(i);
byte[] data;
try {
data = ZKUtil.getData(this.watcher, ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, t));
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (data != null) {
SplitLogTask slt = null;
try {
slt = SplitLogTask.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse data for znode " + t, e);
}
if (slt != null && slt.isDone()) {
continue;
}
}
// decode the file name
t = ZKSplitLog.getFileName(t);
ServerName serverName = AbstractFSWALProvider.getServerNameFromWALDirectoryName(new Path(t));
if (serverName != null) {
knownFailedServers.add(serverName.getServerName());
} else {
LOG.warn("Found invalid WAL log file name:" + t);
}
}
}
// remove recovering regions which doesn't have any RS associated with it
List<String> regions = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.recoveringRegionsZNode);
if (regions != null) {
int listSize = regions.size();
for (int i = 0; i < listSize; i++) {
String nodePath = ZKUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, regions.get(i));
List<String> regionFailedServers = ZKUtil.listChildrenNoWatch(watcher, nodePath);
if (regionFailedServers == null || regionFailedServers.isEmpty()) {
ZKUtil.deleteNode(watcher, nodePath);
continue;
}
boolean needMoreRecovery = false;
int tmpFailedServerSize = regionFailedServers.size();
for (int j = 0; j < tmpFailedServerSize; j++) {
if (knownFailedServers.contains(regionFailedServers.get(j))) {
needMoreRecovery = true;
break;
}
}
if (!needMoreRecovery) {
ZKUtil.deleteNodeRecursively(watcher, nodePath);
}
}
}
} catch (KeeperException e) {
throw new IOException(e);
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class HColumnDescriptor method parseFrom.
/**
* @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
* @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
* @throws DeserializationException
* @see #toByteArray()
*/
public static HColumnDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
if (!ProtobufUtil.isPBMagicPrefix(bytes))
throw new DeserializationException("No magic");
int pblen = ProtobufUtil.lengthOfPBMagic();
ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
ColumnFamilySchema cfs = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cfs = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return ProtobufUtil.convertToHColumnDesc(cfs);
}
Aggregations