use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.
the class TestGet method testDynamicFilter.
@Test
public void testDynamicFilter() throws Exception {
Configuration conf = HBaseConfiguration.create();
String localPath = conf.get("hbase.local.dir") + File.separator + "jars" + File.separator;
File jarFile = new File(localPath, "MockFilter.jar");
jarFile.delete();
assertFalse("Should be deleted: " + jarFile.getPath(), jarFile.exists());
ClientProtos.Get getProto1 = ClientProtos.Get.parseFrom(Base64.decode(PB_GET));
ClientProtos.Get getProto2 = ClientProtos.Get.parseFrom(Base64.decode(PB_GET_WITH_FILTER_LIST));
try {
ProtobufUtil.toGet(getProto1);
fail("Should not be able to load the filter class");
} catch (IOException ioe) {
assertTrue(ioe.getCause() instanceof ClassNotFoundException);
}
try {
ProtobufUtil.toGet(getProto2);
fail("Should not be able to load the filter class");
} catch (IOException ioe) {
assertTrue(ioe.getCause() instanceof InvocationTargetException);
InvocationTargetException ite = (InvocationTargetException) ioe.getCause();
assertTrue(ite.getTargetException() instanceof DeserializationException);
}
FileOutputStream fos = new FileOutputStream(jarFile);
fos.write(Base64.decode(MOCK_FILTER_JAR));
fos.close();
Get get1 = ProtobufUtil.toGet(getProto1);
assertEquals("test.MockFilter", get1.getFilter().getClass().getName());
Get get2 = ProtobufUtil.toGet(getProto2);
assertTrue(get2.getFilter() instanceof FilterList);
List<Filter> filters = ((FilterList) get2.getFilter()).getFilters();
assertEquals(3, filters.size());
assertEquals("test.MockFilter", filters.get(0).getClass().getName());
assertEquals("my.MockFilter", filters.get(1).getClass().getName());
assertTrue(filters.get(2) instanceof KeyOnlyFilter);
}
use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.
the class TableResource method getScanResource.
@Path("{scanspec: .*[*]$}")
public TableScanResource getScanResource(@Context final UriInfo uriInfo, @PathParam("scanspec") final String scanSpec, @HeaderParam("Accept") final String contentType, @DefaultValue(Integer.MAX_VALUE + "") @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow, @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow, @DefaultValue("") @QueryParam(Constants.SCAN_COLUMN) List<String> column, @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions, @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize, @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime, @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime, @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks, @DefaultValue("false") @QueryParam(Constants.SCAN_REVERSED) boolean reversed, @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String filters) {
try {
Filter filter = null;
Scan tableScan = new Scan();
if (scanSpec.indexOf('*') > 0) {
String prefix = scanSpec.substring(0, scanSpec.indexOf('*'));
byte[] prefixBytes = Bytes.toBytes(prefix);
filter = new PrefixFilter(Bytes.toBytes(prefix));
if (startRow.isEmpty()) {
tableScan.setStartRow(prefixBytes);
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => " + maxVersions + " Batch Size => " + batchSize);
}
Table hTable = RESTServlet.getInstance().getTable(this.table);
tableScan.setBatch(batchSize);
tableScan.setMaxVersions(maxVersions);
tableScan.setTimeRange(startTime, endTime);
if (!startRow.isEmpty()) {
tableScan.setStartRow(Bytes.toBytes(startRow));
}
tableScan.setStopRow(Bytes.toBytes(endRow));
for (String csplit : column) {
String[] familysplit = csplit.trim().split(":");
if (familysplit.length == 2) {
if (familysplit[1].length() > 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("Scan family and column : " + familysplit[0] + " " + familysplit[1]);
}
tableScan.addColumn(Bytes.toBytes(familysplit[0]), Bytes.toBytes(familysplit[1]));
} else {
tableScan.addFamily(Bytes.toBytes(familysplit[0]));
if (LOG.isTraceEnabled()) {
LOG.trace("Scan family : " + familysplit[0] + " and empty qualifier.");
}
tableScan.addColumn(Bytes.toBytes(familysplit[0]), null);
}
} else if (StringUtils.isNotEmpty(familysplit[0])) {
if (LOG.isTraceEnabled()) {
LOG.trace("Scan family : " + familysplit[0]);
}
tableScan.addFamily(Bytes.toBytes(familysplit[0]));
}
}
FilterList filterList = null;
if (StringUtils.isNotEmpty(filters)) {
ParseFilter pf = new ParseFilter();
Filter filterParam = pf.parseFilterString(filters);
if (filter != null) {
filterList = new FilterList(filter, filterParam);
} else {
filter = filterParam;
}
}
if (filterList != null) {
tableScan.setFilter(filterList);
} else if (filter != null) {
tableScan.setFilter(filter);
}
int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
tableScan.setCaching(fetchSize);
tableScan.setReversed(reversed);
return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit);
} catch (IOException exp) {
servlet.getMetrics().incrementFailedScanRequests(1);
processException(exp);
LOG.warn(exp);
return null;
}
}
use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.
the class VerifyReplication method setRowPrefixFilter.
private static void setRowPrefixFilter(Scan scan, String rowPrefixes) {
if (rowPrefixes != null && !rowPrefixes.isEmpty()) {
String[] rowPrefixArray = rowPrefixes.split(",");
Arrays.sort(rowPrefixArray);
FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ONE);
for (String prefix : rowPrefixArray) {
Filter filter = new PrefixFilter(Bytes.toBytes(prefix));
filterList.addFilter(filter);
}
scan.setFilter(filterList);
byte[] startPrefixRow = Bytes.toBytes(rowPrefixArray[0]);
byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length - 1]);
setStartAndStopRows(scan, startPrefixRow, lastPrefixRow);
}
}
use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.
the class VisibilityController method preScannerOpen.
@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException {
if (!initialized) {
throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!");
}
// Nothing to do if authorization is not enabled
if (!authorizationEnabled) {
return s;
}
Region region = e.getEnvironment().getRegion();
Authorizations authorizations = null;
try {
authorizations = scan.getAuthorizations();
} catch (DeserializationException de) {
throw new IOException(de);
}
if (authorizations == null) {
// No Authorizations present for this scan/Get!
// In case of system tables other than "labels" just scan with out visibility check and
// filtering. Checking visibility labels for META and NAMESPACE table is not needed.
TableName table = region.getRegionInfo().getTable();
if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
return s;
}
}
Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region, authorizations);
if (visibilityLabelFilter != null) {
Filter filter = scan.getFilter();
if (filter != null) {
scan.setFilter(new FilterList(filter, visibilityLabelFilter));
} else {
scan.setFilter(visibilityLabelFilter);
}
}
return s;
}
use of org.apache.hadoop.hbase.filter.FilterList in project hbase by apache.
the class AccessController method internalPreRead.
private void internalPreRead(final ObserverContext<RegionCoprocessorEnvironment> c, final Query query, OpType opType) throws IOException {
Filter filter = query.getFilter();
// Don't wrap an AccessControlFilter
if (filter != null && filter instanceof AccessControlFilter) {
return;
}
User user = getActiveUser(c);
RegionCoprocessorEnvironment env = c.getEnvironment();
Map<byte[], ? extends Collection<byte[]>> families = null;
switch(opType) {
case GET:
case EXISTS:
families = ((Get) query).getFamilyMap();
break;
case SCAN:
families = ((Scan) query).getFamilyMap();
break;
default:
throw new RuntimeException("Unhandled operation " + opType);
}
AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ);
Region region = getRegion(env);
TableName table = getTableName(region);
Map<ByteRange, Integer> cfVsMaxVersions = Maps.newHashMap();
for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions());
}
if (!authResult.isAllowed()) {
if (!cellFeaturesEnabled || compatibleEarlyTermination) {
// filter) but that's the price of backwards compatibility.
if (hasFamilyQualifierPermission(user, Action.READ, env, families)) {
authResult.setAllowed(true);
authResult.setReason("Access allowed with filter");
// Only wrap the filter if we are enforcing authorizations
if (authorizationEnabled) {
Filter ourFilter = new AccessControlFilter(authManager, user, table, AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, cfVsMaxVersions);
// wrap any existing filter
if (filter != null) {
ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, Lists.newArrayList(ourFilter, filter));
}
switch(opType) {
case GET:
case EXISTS:
((Get) query).setFilter(ourFilter);
break;
case SCAN:
((Scan) query).setFilter(ourFilter);
break;
default:
throw new RuntimeException("Unhandled operation " + opType);
}
}
}
} else {
// New behavior: Any access we might be granted is more fine-grained
// than whole table or CF. Simply inject a filter and return what is
// allowed. We will not throw an AccessDeniedException. This is a
// behavioral change since 0.96.
authResult.setAllowed(true);
authResult.setReason("Access allowed with filter");
// Only wrap the filter if we are enforcing authorizations
if (authorizationEnabled) {
Filter ourFilter = new AccessControlFilter(authManager, user, table, AccessControlFilter.Strategy.CHECK_CELL_DEFAULT, cfVsMaxVersions);
// wrap any existing filter
if (filter != null) {
ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, Lists.newArrayList(ourFilter, filter));
}
switch(opType) {
case GET:
case EXISTS:
((Get) query).setFilter(ourFilter);
break;
case SCAN:
((Scan) query).setFilter(ourFilter);
break;
default:
throw new RuntimeException("Unhandled operation " + opType);
}
}
}
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") + "' (table=" + table + ", action=READ)");
}
}
Aggregations