use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project gora by apache.
the class DefaultFactory method createFilter.
@Override
public org.apache.hadoop.hbase.filter.Filter createFilter(Filter<K, T> filter, HBaseStore<K, T> store) {
if (filter instanceof FilterList) {
FilterList<K, T> filterList = (FilterList<K, T>) filter;
org.apache.hadoop.hbase.filter.FilterList hbaseFilter = new org.apache.hadoop.hbase.filter.FilterList(Operator.valueOf(filterList.getOperator().name()));
for (Filter<K, T> rowFitler : filterList.getFilters()) {
FilterFactory<K, T> factory = getHbaseFitlerUtil().getFactory(rowFitler);
if (factory == null) {
LOG.warn("HBase remote filter factory not yet implemented for " + rowFitler.getClass().getCanonicalName());
return null;
}
org.apache.hadoop.hbase.filter.Filter hbaseRowFilter = factory.createFilter(rowFitler, store);
if (hbaseRowFilter != null) {
hbaseFilter.addFilter(hbaseRowFilter);
}
}
return hbaseFilter;
} else if (filter instanceof SingleFieldValueFilter) {
SingleFieldValueFilter<K, T> fieldFilter = (SingleFieldValueFilter<K, T>) filter;
HBaseColumn column = store.getMapping().getColumn(fieldFilter.getFieldName());
CompareOp compareOp = getCompareOp(fieldFilter.getFilterOp());
byte[] family = column.getFamily();
byte[] qualifier = column.getQualifier();
byte[] value = HBaseByteInterface.toBytes(fieldFilter.getOperands().get(0));
SingleColumnValueFilter hbaseFilter = new SingleColumnValueFilter(family, qualifier, compareOp, value);
hbaseFilter.setFilterIfMissing(fieldFilter.isFilterIfMissing());
return hbaseFilter;
} else if (filter instanceof MapFieldValueFilter) {
MapFieldValueFilter<K, T> mapFilter = (MapFieldValueFilter<K, T>) filter;
HBaseColumn column = store.getMapping().getColumn(mapFilter.getFieldName());
CompareOp compareOp = getCompareOp(mapFilter.getFilterOp());
byte[] family = column.getFamily();
byte[] qualifier = HBaseByteInterface.toBytes(mapFilter.getMapKey());
byte[] value = HBaseByteInterface.toBytes(mapFilter.getOperands().get(0));
SingleColumnValueFilter hbaseFilter = new SingleColumnValueFilter(family, qualifier, compareOp, value);
hbaseFilter.setFilterIfMissing(mapFilter.isFilterIfMissing());
return hbaseFilter;
} else {
LOG.warn("HBase remote filter not yet implemented for " + filter.getClass().getCanonicalName());
return null;
}
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project phoenix by apache.
the class IndexHalfStoreFileReaderGenerator method preStoreFileReaderOpen.
@Override
public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, Reference r, Reader reader) throws IOException {
TableName tableName = ctx.getEnvironment().getRegion().getTableDesc().getTableName();
Region region = ctx.getEnvironment().getRegion();
HRegionInfo childRegion = region.getRegionInfo();
byte[] splitKey = null;
if (reader == null && r != null) {
if (!p.toString().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
return super.preStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, reader);
}
Scan scan = MetaTableAccessor.getScanForTableName(tableName);
SingleColumnValueFilter scvf = null;
if (Reference.isTopFileRegion(r.getFileRegion())) {
scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, CompareOp.EQUAL, region.getRegionInfo().toByteArray());
scvf.setFilterIfMissing(true);
} else {
scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, CompareOp.EQUAL, region.getRegionInfo().toByteArray());
scvf.setFilterIfMissing(true);
}
if (scvf != null)
scan.setFilter(scvf);
byte[] regionStartKeyInHFile = null;
HTable metaTable = null;
PhoenixConnection conn = null;
try {
metaTable = new HTable(ctx.getEnvironment().getConfiguration(), TableName.META_TABLE_NAME);
ResultScanner scanner = null;
Result result = null;
try {
scanner = metaTable.getScanner(scan);
result = scanner.next();
} finally {
if (scanner != null)
scanner.close();
}
if (result == null || result.isEmpty()) {
Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor.getRegionsFromMergeQualifier(ctx.getEnvironment().getRegionServerServices().getConnection(), region.getRegionInfo().getRegionName());
if (mergeRegions == null || mergeRegions.getFirst() == null)
return reader;
byte[] splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(r.getSplitKey()));
// is equal to merged region start key. So returning same reader.
if (Bytes.compareTo(mergeRegions.getFirst().getStartKey(), splitRow) == 0) {
if (mergeRegions.getFirst().getStartKey().length == 0 && region.getRegionInfo().getEndKey().length != mergeRegions.getFirst().getEndKey().length) {
childRegion = mergeRegions.getFirst();
regionStartKeyInHFile = mergeRegions.getFirst().getStartKey().length == 0 ? new byte[mergeRegions.getFirst().getEndKey().length] : mergeRegions.getFirst().getStartKey();
} else {
return reader;
}
} else {
childRegion = mergeRegions.getSecond();
regionStartKeyInHFile = mergeRegions.getSecond().getStartKey();
}
splitKey = KeyValue.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 ? new byte[region.getRegionInfo().getEndKey().length] : region.getRegionInfo().getStartKey()).getKey();
} else {
HRegionInfo parentRegion = HRegionInfo.getHRegionInfo(result);
regionStartKeyInHFile = parentRegion.getStartKey().length == 0 ? new byte[parentRegion.getEndKey().length] : parentRegion.getStartKey();
}
} finally {
if (metaTable != null)
metaTable.close();
}
try {
conn = QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()).unwrap(PhoenixConnection.class);
PTable dataTable = IndexUtil.getPDataTable(conn, ctx.getEnvironment().getRegion().getTableDesc());
List<PTable> indexes = dataTable.getIndexes();
Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers = new HashMap<ImmutableBytesWritable, IndexMaintainer>();
for (PTable index : indexes) {
if (index.getIndexType() == IndexType.LOCAL) {
IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, conn);
indexMaintainers.put(new ImmutableBytesWritable(MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId())), indexMaintainer);
}
}
if (indexMaintainers.isEmpty())
return reader;
byte[][] viewConstants = getViewConstants(dataTable);
return new IndexHalfStoreFileReader(fs, p, cacheConf, in, size, r, ctx.getEnvironment().getConfiguration(), indexMaintainers, viewConstants, childRegion, regionStartKeyInHFile, splitKey);
} catch (ClassNotFoundException e) {
throw new IOException(e);
} catch (SQLException e) {
throw new IOException(e);
} finally {
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
throw new IOException(e);
}
}
}
}
return reader;
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project phoenix by apache.
the class MetaDataEndpointImpl method findChildViews_deprecated.
// TODO remove this in 4.13 release
@Deprecated
private TableViewFinder findChildViews_deprecated(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes) throws IOException {
byte[] schemaName = table.getSchemaName().getBytes();
byte[] tableName = table.getTableName().getBytes();
boolean isMultiTenant = table.isMultiTenant();
Scan scan = new Scan();
// the same tenantId.
if (!isMultiTenant) {
byte[] startRow = ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY);
byte[] stopRow = ByteUtil.nextKey(startRow);
scan.setStartRow(startRow);
scan.setStopRow(stopRow);
}
SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, linkTypeBytes);
SingleColumnValueFilter tableTypeFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES, CompareOp.EQUAL, PTableType.VIEW.getSerializedValue().getBytes());
tableTypeFilter.setFilterIfMissing(false);
linkFilter.setFilterIfMissing(true);
byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil.getPhysicalTableName(SchemaUtil.getTableNameAsBytes(schemaName, tableName), table.isNamespaceMapped()).getName());
SuffixFilter rowFilter = new SuffixFilter(suffix);
FilterList filter = new FilterList(linkFilter, tableTypeFilter, rowFilter);
scan.setFilter(filter);
scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
scan.addColumn(TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
// Original region-only scanner modified due to PHOENIX-1208
// RegionScanner scanner = region.getScanner(scan);
// The following *should* work, but doesn't due to HBASE-11837
// TableName systemCatalogTableName = region.getTableDesc().getTableName();
// HTableInterface hTable = env.getTable(systemCatalogTableName);
// These deprecated calls work around the issue
HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, region.getTableDesc().getTableName().getName());
try {
boolean allViewsInCurrentRegion = true;
int numOfChildViews = 0;
List<ViewInfo> viewInfoList = Lists.newArrayList();
ResultScanner scanner = hTable.getScanner(scan);
try {
for (Result result = scanner.next(); (result != null); result = scanner.next()) {
numOfChildViews++;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ResultTuple resultTuple = new ResultTuple(result);
resultTuple.getKey(ptr);
byte[] key = ptr.copyBytes();
if (checkTableKeyInRegion(key, region) != null) {
allViewsInCurrentRegion = false;
}
byte[][] rowKeyMetaData = new byte[3][];
getVarChars(result.getRow(), 3, rowKeyMetaData);
byte[] viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
byte[] viewSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
byte[] viewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
viewInfoList.add(new ViewInfo(viewTenantId, viewSchemaName, viewName));
}
TableViewFinder tableViewFinderResult = new TableViewFinder(viewInfoList);
if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
tableViewFinderResult.setAllViewsNotInSingleRegion();
}
return tableViewFinderResult;
} finally {
scanner.close();
}
} finally {
hTable.close();
}
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project cdap by caskdata.
the class HBase10CDHQueueConsumer method createStateFilter.
/**
* Creates a HBase filter that will filter out rows with state column state = PROCESSED (ignoring transaction).
*/
private Filter createStateFilter() {
byte[] processedMask = new byte[Ints.BYTES * 2 + 1];
processedMask[processedMask.length - 1] = ConsumerEntryState.PROCESSED.getState();
return new SingleColumnValueFilter(QueueEntryRow.COLUMN_FAMILY, stateColumnName, CompareFilter.CompareOp.NOT_EQUAL, new BitComparator(processedMask, BitComparator.BitwiseOp.AND));
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project cdap by caskdata.
the class HBase98QueueConsumer method createStateFilter.
/**
* Creates a HBase filter that will filter out rows with state column state = PROCESSED (ignoring transaction).
*/
private Filter createStateFilter() {
byte[] processedMask = new byte[Ints.BYTES * 2 + 1];
processedMask[processedMask.length - 1] = ConsumerEntryState.PROCESSED.getState();
return new SingleColumnValueFilter(QueueEntryRow.COLUMN_FAMILY, stateColumnName, CompareFilter.CompareOp.NOT_EQUAL, new BitComparator(processedMask, BitComparator.BitwiseOp.AND));
}
Aggregations