use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project pxf by greenplum-db.
the class HBaseFilterBuilderTest method parseIsNotNullExpression.
@Test
public void parseIsNotNullExpression() throws Exception {
Filter filter = helper("a1o9", tupleDescription);
assertTrue(filter instanceof SingleColumnValueFilter);
SingleColumnValueFilter result = (SingleColumnValueFilter) filter;
assertNotNull(result);
assertSame(families[1], result.getFamily());
assertSame(qualifiers[1], result.getQualifier());
assertEquals(CompareFilter.CompareOp.NOT_EQUAL, result.getOperator());
assertTrue(result.getComparator() instanceof NullComparator);
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project pxf by greenplum-db.
the class HBaseFilterBuilderTest method testOrOperator.
@Test
public void testOrOperator() throws Exception {
// a1 > '2008-02-01' or a2 > 1200
Filter filter = helper("a1c25s10d2008-02-01o2a2c20s4d1200o2l1", tupleDescription);
assertNotNull(filter);
assertTrue(filter instanceof FilterList);
FilterList filterList = (FilterList) filter;
assertEquals(FilterList.Operator.MUST_PASS_ONE, filterList.getOperator());
assertNotNull(filterList.getFilters());
assertEquals(2, filterList.getFilters().size());
Filter left = filterList.getFilters().get(0);
Filter right = filterList.getFilters().get(1);
assertTrue(left instanceof SingleColumnValueFilter);
assertTrue(right instanceof SingleColumnValueFilter);
SingleColumnValueFilter scvFilterLeft = (SingleColumnValueFilter) left;
SingleColumnValueFilter scvFilterRight = (SingleColumnValueFilter) right;
assertEquals(families[1], scvFilterLeft.getFamily());
assertEquals(qualifiers[1], scvFilterLeft.getQualifier());
assertEquals(CompareFilter.CompareOp.GREATER, scvFilterLeft.getOperator());
assertEquals(0, scvFilterLeft.getComparator().compareTo("2008-02-01".getBytes()));
assertEquals(families[2], scvFilterRight.getFamily());
assertEquals(qualifiers[2], scvFilterRight.getQualifier());
assertEquals(CompareFilter.CompareOp.GREATER, scvFilterRight.getOperator());
assertEquals(0, scvFilterRight.getComparator().compareTo("1200".getBytes()));
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project pxf by greenplum-db.
the class HBaseFilterBuilderTest method testNestedLogicalOperators.
@Test
public void testNestedLogicalOperators() throws Exception {
// cdate > '2008-02-01' OR (cdate < '2008-12-01' AND amt > 1200)
Filter filter = helper("a1c1082s10d2008-02-01o2a1c1082s10d2008-12-01o1a0c23s4d1200o2l0l1", tupleDescription);
assertNotNull(filter);
assertTrue(filter instanceof FilterList);
FilterList filterList = (FilterList) filter;
assertEquals(FilterList.Operator.MUST_PASS_ONE, filterList.getOperator());
assertNotNull(filterList.getFilters());
assertEquals(2, filterList.getFilters().size());
Filter left = filterList.getFilters().get(0);
Filter right = filterList.getFilters().get(1);
assertTrue(left instanceof SingleColumnValueFilter);
assertTrue(right instanceof FilterList);
SingleColumnValueFilter scvFilterLeft = (SingleColumnValueFilter) left;
FilterList scvFilterListRight = (FilterList) right;
assertEquals(families[1], scvFilterLeft.getFamily());
assertEquals(qualifiers[1], scvFilterLeft.getQualifier());
assertEquals(CompareFilter.CompareOp.GREATER, scvFilterLeft.getOperator());
assertEquals(0, scvFilterLeft.getComparator().compareTo("2008-02-01".getBytes()));
assertEquals(FilterList.Operator.MUST_PASS_ALL, scvFilterListRight.getOperator());
assertNotNull(scvFilterListRight.getFilters());
assertEquals(2, scvFilterListRight.getFilters().size());
left = scvFilterListRight.getFilters().get(0);
right = scvFilterListRight.getFilters().get(1);
assertTrue(left instanceof SingleColumnValueFilter);
assertTrue(right instanceof SingleColumnValueFilter);
scvFilterLeft = (SingleColumnValueFilter) left;
SingleColumnValueFilter scvFilterRight = (SingleColumnValueFilter) right;
assertEquals(families[1], scvFilterLeft.getFamily());
assertEquals(qualifiers[1], scvFilterLeft.getQualifier());
assertEquals(CompareFilter.CompareOp.LESS, scvFilterLeft.getOperator());
assertEquals(0, scvFilterLeft.getComparator().compareTo("2008-12-01".getBytes()));
assertEquals(families[0], scvFilterRight.getFamily());
assertEquals(qualifiers[0], scvFilterRight.getQualifier());
assertEquals(CompareFilter.CompareOp.GREATER, scvFilterRight.getOperator());
assertEquals(0, scvFilterRight.getComparator().compareTo("1200".getBytes()));
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project elasticflow by springwings.
the class HbaseReader method getPageData.
@Override
public DataPage getPageData(final Page page, int pageSize) throws EFException {
PREPARE(false, false);
boolean releaseConn = false;
try {
if (!ISLINK())
return this.dataPage;
Table table = (Table) GETSOCKET().getConnection(END_TYPE.reader);
Scan scan = new Scan();
List<Filter> filters = new ArrayList<Filter>();
SingleColumnValueFilter range = new SingleColumnValueFilter(Bytes.toBytes(this.columnFamily), Bytes.toBytes(page.getReaderScanKey()), CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes(page.getStart())));
range.setLatestVersionOnly(true);
range.setFilterIfMissing(true);
filters.add(range);
scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, filters));
scan.setStartRow(Bytes.toBytes(page.getStart()));
scan.setStopRow(Bytes.toBytes(page.getEnd()));
scan.setCaching(pageSize);
scan.addFamily(Bytes.toBytes(this.columnFamily));
ResultScanner resultScanner = table.getScanner(scan);
try {
String dataBoundary = null;
String updateFieldValue = null;
this.dataPage.put(GlobalParam.READER_KEY, page.getReaderKey());
this.dataPage.put(GlobalParam.READER_SCAN_KEY, page.getReaderScanKey());
if (this.readHandler != null && this.readHandler.supportHandleData()) {
this.readHandler.handleData(this, resultScanner, page, pageSize);
} else {
for (Result r : resultScanner) {
PipeDataUnit u = PipeDataUnit.getInstance();
for (Cell cell : r.rawCells()) {
String k = new String(CellUtil.cloneQualifier(cell));
String v = new String(CellUtil.cloneValue(cell), "UTF-8");
if (k.equals(this.dataPage.get(GlobalParam.READER_KEY))) {
u.setReaderKeyVal(v);
dataBoundary = v;
}
if (k.equals(this.dataPage.get(GlobalParam.READER_SCAN_KEY))) {
updateFieldValue = v;
}
PipeDataUnit.addFieldValue(k, v, page.getInstanceConfig().getReadFields(), u);
}
this.dataUnit.add(u);
}
}
if (updateFieldValue == null) {
this.dataPage.put(GlobalParam.READER_LAST_STAMP, System.currentTimeMillis());
} else {
this.dataPage.put(GlobalParam.READER_LAST_STAMP, updateFieldValue);
}
this.dataPage.putDataBoundary(dataBoundary);
this.dataPage.putData(this.dataUnit);
} catch (Exception e) {
releaseConn = true;
this.dataPage.put(GlobalParam.READER_LAST_STAMP, -1);
log.error("get dataPage Exception will auto free connection!");
throw new EFException(e);
}
} catch (Exception e) {
releaseConn = true;
log.error("get dataPage Exception will auto free connection!");
throw new EFException(e);
} finally {
REALEASE(false, releaseConn);
}
return this.dataPage;
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project elasticflow by springwings.
the class HbaseReader method getPageSplit.
@Override
public ConcurrentLinkedDeque<String> getPageSplit(final Task task, int pageSize) throws EFException {
int i = 0;
ConcurrentLinkedDeque<String> dt = new ConcurrentLinkedDeque<>();
PREPARE(false, false);
if (!ISLINK())
return dt;
boolean releaseConn = false;
try {
Scan scan = new Scan();
Table table = (Table) GETSOCKET().getConnection(END_TYPE.reader);
List<Filter> filters = new ArrayList<Filter>();
SingleColumnValueFilter range = new SingleColumnValueFilter(Bytes.toBytes(this.columnFamily), Bytes.toBytes(task.getScanParam().getScanField()), CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes(task.getStartTime())));
range.setLatestVersionOnly(true);
range.setFilterIfMissing(true);
filters.add(range);
scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, filters));
scan.setCaching(pageSize);
scan.addFamily(Bytes.toBytes(this.columnFamily));
scan.addColumn(Bytes.toBytes(this.columnFamily), Bytes.toBytes(task.getScanParam().getScanField()));
ResultScanner resultScanner = table.getScanner(scan);
for (Result r : resultScanner) {
if (i % pageSize == 0) {
dt.add(Bytes.toString(r.getRow()));
}
i += r.size();
}
} catch (Exception e) {
releaseConn = true;
log.error("get page splits exception will auto free connection!");
throw new EFException(e);
} finally {
REALEASE(false, releaseConn);
}
return dt;
}
Aggregations