use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class VisibilityController method prePrepareTimeStampForDeleteVersion.
@Override
public void prePrepareTimeStampForDeleteVersion(ObserverContext<RegionCoprocessorEnvironment> ctx, Mutation delete, Cell cell, byte[] byteNow, Get get) throws IOException {
// Nothing to do if we are not filtering by visibility
if (!authorizationEnabled) {
return;
}
CellVisibility cellVisibility = null;
try {
cellVisibility = delete.getCellVisibility();
} catch (DeserializationException de) {
throw new IOException("Invalid cell visibility specified " + delete, de);
}
// The check for checkForReservedVisibilityTagPresence happens in preBatchMutate happens.
// It happens for every mutation and that would be enough.
List<Tag> visibilityTags = new ArrayList<>();
if (cellVisibility != null) {
String labelsExp = cellVisibility.getExpression();
try {
visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, false, false);
} catch (InvalidLabelException e) {
throw new IOException("Invalid cell visibility specified " + labelsExp, e);
}
}
get.setFilter(new DeleteVersionVisibilityExpressionFilter(visibilityTags, VisibilityConstants.SORTED_ORDINAL_SERIALIZATION_FORMAT));
try (RegionScanner scanner = ctx.getEnvironment().getRegion().getScanner(new Scan(get))) {
// NOTE: Please don't use HRegion.get() instead,
// because it will copy cells to heap. See HBASE-26036
List<Cell> result = new ArrayList<>();
scanner.next(result);
if (result.size() < get.getMaxVersions()) {
// Nothing to delete
PrivateCellUtil.updateLatestStamp(cell, byteNow);
return;
}
if (result.size() > get.getMaxVersions()) {
throw new RuntimeException("Unexpected size: " + result.size() + ". Results more than the max versions obtained.");
}
Cell getCell = result.get(get.getMaxVersions() - 1);
PrivateCellUtil.setTimestamp(cell, getCell.getTimestamp());
}
// We are bypassing here because in the HRegion.updateDeleteLatestVersionTimeStamp we would
// update with the current timestamp after again doing a get. As the hook as already determined
// the needed timestamp we need to bypass here.
// TODO : See if HRegion.updateDeleteLatestVersionTimeStamp() could be
// called only if the hook is not called.
ctx.bypass();
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class VisibilityLabelsCache method refreshLabelsCache.
public void refreshLabelsCache(byte[] data) throws IOException {
List<VisibilityLabel> visibilityLabels = null;
try {
visibilityLabels = VisibilityUtils.readLabelsFromZKData(data);
} catch (DeserializationException dse) {
throw new IOException(dse);
}
this.lock.writeLock().lock();
try {
labels.clear();
ordinalVsLabels.clear();
for (VisibilityLabel visLabel : visibilityLabels) {
String label = Bytes.toString(visLabel.getLabel().toByteArray());
labels.put(label, visLabel.getOrdinal());
ordinalVsLabels.put(visLabel.getOrdinal(), label);
}
} finally {
this.lock.writeLock().unlock();
}
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class VisibilityUtils method readLabelsFromZKData.
/**
* Reads back from the zookeeper. The data read here is of the form written by
* writeToZooKeeper(Map<byte[], Integer> entries).
*
* @param data
* @return Labels and their ordinal details
* @throws DeserializationException
*/
public static List<VisibilityLabel> readLabelsFromZKData(byte[] data) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(data)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
try {
VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
return builder.getVisLabelList();
} catch (IOException e) {
throw new DeserializationException(e);
}
}
return null;
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class VisibilityUtils method readUserAuthsFromZKData.
/**
* Reads back User auth data written to zookeeper.
* @param data
* @return User auth details
* @throws DeserializationException
*/
public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(data)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
try {
MultiUserAuthorizations.Builder builder = MultiUserAuthorizations.newBuilder();
ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
return builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
}
return null;
}
use of org.apache.hadoop.hbase.exceptions.DeserializationException in project hbase by apache.
the class VerifyingRSGroupAdmin method verify.
private void verify() throws IOException {
Map<String, RSGroupInfo> groupMap = Maps.newHashMap();
Set<RSGroupInfo> zList = Sets.newHashSet();
List<TableDescriptor> tds = new ArrayList<>();
try (Admin admin = conn.getAdmin()) {
tds.addAll(admin.listTableDescriptors());
tds.addAll(admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME));
}
SortedSet<Address> lives = Sets.newTreeSet();
for (ServerName sn : conn.getAdmin().getClusterMetrics().getLiveServerMetrics().keySet()) {
lives.add(sn.getAddress());
}
for (ServerName sn : conn.getAdmin().listDecommissionedRegionServers()) {
lives.remove(sn.getAddress());
}
try (Table table = conn.getTable(RSGroupInfoManagerImpl.RSGROUP_TABLE_NAME);
ResultScanner scanner = table.getScanner(new Scan())) {
for (; ; ) {
Result result = scanner.next();
if (result == null) {
break;
}
RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(result.getValue(RSGroupInfoManagerImpl.META_FAMILY_BYTES, RSGroupInfoManagerImpl.META_QUALIFIER_BYTES));
RSGroupInfo rsGroupInfo = ProtobufUtil.toGroupInfo(proto);
groupMap.put(proto.getName(), RSGroupUtil.fillTables(rsGroupInfo, tds));
for (Address address : rsGroupInfo.getServers()) {
lives.remove(address);
}
}
}
SortedSet<TableName> tables = Sets.newTreeSet();
for (TableDescriptor td : conn.getAdmin().listTableDescriptors(Pattern.compile(".*"), true)) {
String groupName = td.getRegionServerGroup().orElse(RSGroupInfo.DEFAULT_GROUP);
if (groupName.equals(RSGroupInfo.DEFAULT_GROUP)) {
tables.add(td.getTableName());
}
}
groupMap.put(RSGroupInfo.DEFAULT_GROUP, new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, lives, tables));
assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(admin.listRSGroups()));
try {
String groupBasePath = ZNodePaths.joinZNode(zkw.getZNodePaths().baseZNode, "rsgroup");
for (String znode : ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
byte[] data = ZKUtil.getData(zkw, ZNodePaths.joinZNode(groupBasePath, znode));
if (data.length > 0) {
ProtobufUtil.expectPBMagicPrefix(data);
ByteArrayInputStream bis = new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
RSGroupInfo rsGroupInfo = ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis));
zList.add(RSGroupUtil.fillTables(rsGroupInfo, tds));
}
}
groupMap.remove(RSGroupInfo.DEFAULT_GROUP);
assertEquals(zList.size(), groupMap.size());
for (RSGroupInfo rsGroupInfo : zList) {
assertTrue(groupMap.get(rsGroupInfo.getName()).equals(rsGroupInfo));
}
} catch (KeeperException e) {
throw new IOException("ZK verification failed", e);
} catch (DeserializationException e) {
throw new IOException("ZK verification failed", e);
} catch (InterruptedException e) {
throw new IOException("ZK verification failed", e);
}
}
Aggregations