use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class Mutation method toCellVisibility.
/**
* Convert a protocol buffer CellVisibility bytes to a client CellVisibility
*
* @param protoBytes
* @return the converted client CellVisibility
* @throws DeserializationException
*/
private static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null)
return null;
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
ClientProtos.CellVisibility proto = null;
try {
ProtobufUtil.mergeFrom(builder, protoBytes);
proto = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return toCellVisibility(proto);
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class MetaRegionLocationCache method getMetaRegionLocation.
/**
* Gets the HRegionLocation for a given meta replica ID. Renews the watch on the znode for
* future updates.
* @param replicaId ReplicaID of the region.
* @return HRegionLocation for the meta replica.
* @throws KeeperException if there is any issue fetching/parsing the serialized data.
*/
private HRegionLocation getMetaRegionLocation(int replicaId) throws KeeperException {
RegionState metaRegionState;
try {
byte[] data = ZKUtil.getDataAndWatch(watcher, watcher.getZNodePaths().getZNodeForReplica(replicaId));
metaRegionState = ProtobufUtil.parseMetaRegionStateFrom(data, replicaId);
} catch (DeserializationException e) {
throw ZKUtil.convert(e);
}
return new HRegionLocation(metaRegionState.getRegion(), metaRegionState.getServerName());
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class SplitLogTask method parseFrom.
/**
* @param data Serialized date to parse.
* @return An SplitLogTaskState instance made of the passed <code>data</code>
* @throws DeserializationException
* @see #toByteArray()
*/
public static SplitLogTask parseFrom(final byte[] data) throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(data);
try {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.SplitLogTask.Builder builder = ZooKeeperProtos.SplitLogTask.newBuilder();
ProtobufUtil.mergeFrom(builder, data, prefixLen, data.length - prefixLen);
return new SplitLogTask(builder.build());
} catch (IOException e) {
throw new DeserializationException(Bytes.toStringBinary(data, 0, 64), e);
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class ZkSplitLogWorkerCoordination method getDataSetWatchSuccess.
void getDataSetWatchSuccess(String path, byte[] data) {
SplitLogTask slt;
try {
slt = SplitLogTask.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse", e);
return;
}
synchronized (grabTaskLock) {
if (workerInGrabTask) {
// currentTask can change but that's ok
String taskpath = currentTask;
if (taskpath != null && taskpath.equals(path)) {
// worker to unassigned to owned by another worker
if (!slt.isOwned(serverName) && !slt.isDone(serverName) && !slt.isErr(serverName) && !slt.isResigned(serverName)) {
LOG.info("task " + taskpath + " preempted from " + serverName + ", current task state and owner=" + slt.toString());
worker.stopTask();
}
}
}
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class MasterAddressTracker method getBackupMastersAndRenewWatch.
/**
* Retrieves the list of registered backup masters and renews a watch on the znode for children
* updates.
* @param zkw Zookeeper watcher to use
* @return List of backup masters.
* @throws InterruptedIOException if there is any issue fetching the required data from Zookeeper.
*/
public static List<ServerName> getBackupMastersAndRenewWatch(ZKWatcher zkw) throws InterruptedIOException {
// Build Set of backup masters from ZK nodes
List<String> backupMasterStrings = null;
try {
backupMasterStrings = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.getZNodePaths().backupMasterAddressesZNode);
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to list backup servers"), e);
}
List<ServerName> backupMasters = Collections.emptyList();
if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
backupMasters = new ArrayList<>(backupMasterStrings.size());
for (String s : backupMasterStrings) {
try {
byte[] bytes;
try {
bytes = ZKUtil.getData(zkw, ZNodePaths.joinZNode(zkw.getZNodePaths().backupMasterAddressesZNode, s));
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (bytes != null) {
ServerName sn;
try {
sn = ProtobufUtil.parseServerNameFrom(bytes);
} catch (DeserializationException e) {
LOG.warn("Failed parse, skipping registering backup server", e);
continue;
}
backupMasters.add(sn);
}
} catch (KeeperException e) {
LOG.warn(zkw.prefix("Unable to get information about " + "backup servers"), e);
}
}
backupMasters.sort(Comparator.comparing(ServerName::getServerName));
}
return backupMasters;
}
Aggregations