use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.
the class TestHRegion method buildScanner.
private InternalScanner buildScanner(String keyPrefix, String value, HRegion r) throws IOException {
// Defaults FilterList.Operator.MUST_PASS_ALL.
FilterList allFilters = new FilterList();
allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
// Only return rows where this column value exists in the row.
SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
filter.setFilterIfMissing(true);
allFilters.addFilter(filter);
Scan scan = new Scan();
scan.addFamily(Bytes.toBytes("trans-blob"));
scan.addFamily(Bytes.toBytes("trans-type"));
scan.addFamily(Bytes.toBytes("trans-date"));
scan.addFamily(Bytes.toBytes("trans-tags"));
scan.addFamily(Bytes.toBytes("trans-group"));
scan.setFilter(allFilters);
return r.getScanner(scan);
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.
the class TestHRegion method testFlushCacheWhileScanning.
/**
* Flushes the cache in a thread while scanning. The tests verify that the
* scan is coherent - e.g. the returned results are always of the same or
* later update as the previous results.
*
* @throws IOException
* scan / compact
* @throws InterruptedException
* thread join
*/
@Test
public void testFlushCacheWhileScanning() throws IOException, InterruptedException {
byte[] family = Bytes.toBytes("family");
int numRows = 1000;
int flushAndScanInterval = 10;
int compactInterval = 10 * flushAndScanInterval;
this.region = initHRegion(tableName, method, CONF, family);
FlushThread flushThread = new FlushThread();
try {
flushThread.start();
Scan scan = new Scan();
scan.addFamily(family);
scan.setFilter(new SingleColumnValueFilter(family, qual1, CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(5L))));
int expectedCount = 0;
List<Cell> res = new ArrayList<>();
boolean toggle = true;
for (long i = 0; i < numRows; i++) {
Put put = new Put(Bytes.toBytes(i));
put.setDurability(Durability.SKIP_WAL);
put.addColumn(family, qual1, Bytes.toBytes(i % 10));
region.put(put);
if (i != 0 && i % compactInterval == 0) {
LOG.debug("iteration = " + i + " ts=" + System.currentTimeMillis());
region.compact(true);
}
if (i % 10 == 5L) {
expectedCount++;
}
if (i != 0 && i % flushAndScanInterval == 0) {
res.clear();
InternalScanner scanner = region.getScanner(scan);
if (toggle) {
flushThread.flush();
}
while (scanner.next(res)) ;
if (!toggle) {
flushThread.flush();
}
assertEquals("toggle=" + toggle + "i=" + i + " ts=" + System.currentTimeMillis(), expectedCount, res.size());
toggle = !toggle;
}
}
} finally {
try {
flushThread.done();
flushThread.join();
flushThread.checkNoError();
} catch (InterruptedException ie) {
LOG.warn("Caught exception when joining with flushThread", ie);
}
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
}
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.
the class TestHRegion method testIndexesScanWithOneDeletedRow.
@Test
public void testIndexesScanWithOneDeletedRow() throws IOException {
byte[] family = Bytes.toBytes("family");
// Setting up region
this.region = initHRegion(tableName, method, CONF, family);
try {
Put put = new Put(Bytes.toBytes(1L));
put.addColumn(family, qual1, 1L, Bytes.toBytes(1L));
region.put(put);
region.flush(true);
Delete delete = new Delete(Bytes.toBytes(1L), 1L);
region.delete(delete);
put = new Put(Bytes.toBytes(2L));
put.addColumn(family, qual1, 2L, Bytes.toBytes(2L));
region.put(put);
Scan idxScan = new Scan();
idxScan.addFamily(family);
idxScan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, Arrays.<Filter>asList(new SingleColumnValueFilter(family, qual1, CompareOp.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes(0L))), new SingleColumnValueFilter(family, qual1, CompareOp.LESS_OR_EQUAL, new BinaryComparator(Bytes.toBytes(3L))))));
InternalScanner scanner = region.getScanner(idxScan);
List<Cell> res = new ArrayList<>();
while (scanner.next(res)) ;
assertEquals(1L, res.size());
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
}
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.
the class TestJoinedScanners method runScanner.
private void runScanner(Table table, boolean slow) throws Exception {
long time = System.nanoTime();
Scan scan = new Scan();
scan.addColumn(cf_essential, col_name);
scan.addColumn(cf_joined, col_name);
SingleColumnValueFilter filter = new SingleColumnValueFilter(cf_essential, col_name, CompareFilter.CompareOp.EQUAL, flag_yes);
filter.setFilterIfMissing(true);
scan.setFilter(filter);
scan.setLoadColumnFamiliesOnDemand(!slow);
ResultScanner result_scanner = table.getScanner(scan);
Result res;
long rows_count = 0;
while ((res = result_scanner.next()) != null) {
rows_count++;
}
double timeSec = (System.nanoTime() - time) / 1000000000.0;
result_scanner.close();
LOG.info((slow ? "Slow" : "Joined") + " scanner finished in " + Double.toString(timeSec) + " seconds, got " + Long.toString(rows_count / 2) + " rows");
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project hbase by apache.
the class TestSCVFWithMiniCluster method setUp.
@BeforeClass
public static void setUp() throws Exception {
HBaseTestingUtility util = new HBaseTestingUtility();
util.startMiniCluster(1);
Admin admin = util.getAdmin();
destroy(admin, HBASE_TABLE_NAME);
create(admin, HBASE_TABLE_NAME, FAMILY_A, FAMILY_B);
admin.close();
htable = util.getConnection().getTable(HBASE_TABLE_NAME);
/* Add some values */
List<Put> puts = new ArrayList<>();
/* Add a row with 'a:foo' = false */
Put put = new Put(Bytes.toBytes("1"));
put.setDurability(Durability.SKIP_WAL);
put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("false"));
put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
puts.add(put);
/* Add a row with 'a:foo' = true */
put = new Put(Bytes.toBytes("2"));
put.setDurability(Durability.SKIP_WAL);
put.addColumn(FAMILY_A, QUALIFIER_FOO, Bytes.toBytes("true"));
put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
puts.add(put);
/* Add a row with 'a:foo' qualifier not set */
put = new Put(Bytes.toBytes("3"));
put.setDurability(Durability.SKIP_WAL);
put.addColumn(FAMILY_A, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_FOO, Bytes.toBytes("_flag_"));
put.addColumn(FAMILY_B, QUALIFIER_BAR, Bytes.toBytes("_flag_"));
puts.add(put);
htable.put(puts);
/*
* We want to filter out from the scan all rows that do not have the column 'a:foo' with value
* 'false'. Only row with key '1' should be returned in the scan.
*/
scanFilter = new SingleColumnValueFilter(FAMILY_A, QUALIFIER_FOO, CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("false")));
((SingleColumnValueFilter) scanFilter).setFilterIfMissing(true);
}
Aggregations