use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class PutSortReducer method reduce.
@Override
protected void reduce(ImmutableBytesWritable row, java.lang.Iterable<Put> puts, Reducer<ImmutableBytesWritable, Put, ImmutableBytesWritable, KeyValue>.Context context) throws java.io.IOException, InterruptedException {
// although reduce() is called per-row, handle pathological case
long threshold = context.getConfiguration().getLong("putsortreducer.row.threshold", 1L * (1 << 30));
Iterator<Put> iter = puts.iterator();
while (iter.hasNext()) {
TreeSet<KeyValue> map = new TreeSet<>(CellComparator.getInstance());
long curSize = 0;
// stop at the end or the RAM threshold
List<Tag> tags = new ArrayList<>();
while (iter.hasNext() && curSize < threshold) {
// clear the tags
tags.clear();
Put p = iter.next();
long t = p.getTTL();
if (t != Long.MAX_VALUE) {
// add TTL tag if found
tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(t)));
}
byte[] acl = p.getACL();
if (acl != null) {
// add ACL tag if found
tags.add(new ArrayBackedTag(TagType.ACL_TAG_TYPE, acl));
}
try {
CellVisibility cellVisibility = p.getCellVisibility();
if (cellVisibility != null) {
// add the visibility labels if any
tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags(cellVisibility.getExpression()));
}
} catch (DeserializationException e) {
// just ignoring the bad one?
throw new IOException("Invalid visibility expression found in mutation " + p, e);
}
for (List<Cell> cells : p.getFamilyCellMap().values()) {
for (Cell cell : cells) {
// Creating the KV which needs to be directly written to HFiles. Using the Facade
// KVCreator for creation of kvs.
KeyValue kv = null;
TagUtil.carryForwardTags(tags, cell);
if (!tags.isEmpty()) {
kv = (KeyValue) kvCreator.create(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), tags);
} else {
kv = KeyValueUtil.ensureKeyValue(cell);
}
if (map.add(kv)) {
// don't count duplicated kv into size
curSize += kv.heapSize();
}
}
}
}
context.setStatus("Read " + map.size() + " entries of " + map.getClass() + "(" + StringUtils.humanReadableInt(curSize) + ")");
int index = 0;
for (KeyValue kv : map) {
context.write(row, kv);
if (++index % 100 == 0)
context.setStatus("Wrote " + index);
}
// if we have more entries to process
if (iter.hasNext()) {
// force flush because we cannot guarantee intra-row sorted order
context.write(null, null);
}
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class ProtobufUtil method toAuthorizations.
/**
* Convert a protocol buffer Authorizations bytes to a client Authorizations
*
* @param protoBytes
* @return the converted client Authorizations
* @throws DeserializationException
*/
public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null)
return null;
ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder();
ClientProtos.Authorizations proto = null;
try {
ProtobufUtil.mergeFrom(builder, protoBytes);
proto = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return toAuthorizations(proto);
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class ProtobufUtil method toServerName.
/**
* Get a ServerName from the passed in data bytes.
* @param data Data with a serialize server name in it; can handle the old style servername where
* servername was host and port. Works too with data that begins w/ the pb 'PBUF' magic
* and that is then followed by a protobuf that has a serialized {@link ServerName} in
* it.
* @return Returns null if <code>data</code> is null else converts passed data to a ServerName
* instance.
*/
public static ServerName toServerName(final byte[] data) throws DeserializationException {
if (data == null || data.length <= 0) {
return null;
}
if (ProtobufMagic.isPBMagicPrefix(data)) {
int prefixLen = ProtobufMagic.lengthOfPBMagic();
try {
ZooKeeperProtos.Master rss = ZooKeeperProtos.Master.parser().parseFrom(data, prefixLen, data.length - prefixLen);
HBaseProtos.ServerName sn = rss.getMaster();
return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode());
} catch (/* InvalidProtocolBufferException */
IOException e) {
// Fail fast if it does.
throw new DeserializationException(e);
}
}
// The str returned could be old style -- pre hbase-1502 -- which was
// hostname and port seperated by a colon rather than hostname, port and
// startcode delimited by a ','.
String str = Bytes.toString(data);
int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR);
if (index != -1) {
// Presume its ServerName serialized with versioned bytes.
return ServerName.parseVersionedServerName(data);
}
// Presume it a hostname:port format.
String hostname = Addressing.parseHostname(str);
int port = Addressing.parsePort(str);
return ServerName.valueOf(hostname, port, -1L);
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class ProtobufUtil method toCellVisibility.
/**
* Convert a protocol buffer CellVisibility bytes to a client CellVisibility
*
* @param protoBytes
* @return the converted client CellVisibility
* @throws DeserializationException
*/
public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null)
return null;
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
ClientProtos.CellVisibility proto = null;
try {
ProtobufUtil.mergeFrom(builder, protoBytes);
proto = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return toCellVisibility(proto);
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class RawAsyncHBaseAdmin method mergeRegions.
@Override
public CompletableFuture<Void> mergeRegions(List<byte[]> nameOfRegionsToMerge, boolean forcible) {
if (nameOfRegionsToMerge.size() < 2) {
return failedFuture(new IllegalArgumentException("Can not merge only " + nameOfRegionsToMerge.size() + " region"));
}
CompletableFuture<Void> future = new CompletableFuture<>();
byte[][] encodedNameOfRegionsToMerge = nameOfRegionsToMerge.stream().map(this::toEncodeRegionName).toArray(byte[][]::new);
addListener(checkRegionsAndGetTableName(encodedNameOfRegionsToMerge), (tableName, err) -> {
if (err != null) {
future.completeExceptionally(err);
return;
}
final MergeTableRegionsRequest request;
try {
request = RequestConverter.buildMergeTableRegionsRequest(encodedNameOfRegionsToMerge, forcible, ng.getNonceGroup(), ng.newNonce());
} catch (DeserializationException e) {
future.completeExceptionally(e);
return;
}
addListener(this.procedureCall(tableName, request, MasterService.Interface::mergeTableRegions, MergeTableRegionsResponse::getProcId, new MergeTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> {
if (err2 != null) {
future.completeExceptionally(err2);
} else {
future.complete(ret);
}
});
});
return future;
}
Aggregations