use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestTags method testTagsWithAppendAndIncrement.
@Test
public void testTagsWithAppendAndIncrement() throws Exception {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
byte[] f = Bytes.toBytes("f");
byte[] q = Bytes.toBytes("q");
byte[] row1 = Bytes.toBytes("r1");
byte[] row2 = Bytes.toBytes("r2");
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor(f);
desc.addFamily(colDesc);
TEST_UTIL.getAdmin().createTable(desc);
Table table = null;
try {
table = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(row1);
byte[] v = Bytes.toBytes(2L);
put.addColumn(f, q, v);
put.setAttribute("visibility", Bytes.toBytes("tag1"));
table.put(put);
Increment increment = new Increment(row1);
increment.addColumn(f, q, 1L);
table.increment(increment);
TestCoprocessorForTags.checkTagPresence = true;
ResultScanner scanner = table.getScanner(new Scan());
Result result = scanner.next();
KeyValue kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
List<Tag> tags = TestCoprocessorForTags.tags;
assertEquals(3L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
assertEquals(1, tags.size());
assertEquals("tag1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
TestCoprocessorForTags.checkTagPresence = false;
TestCoprocessorForTags.tags = null;
increment = new Increment(row1);
increment.add(new KeyValue(row1, f, q, 1234L, v));
increment.setAttribute("visibility", Bytes.toBytes("tag2"));
table.increment(increment);
TestCoprocessorForTags.checkTagPresence = true;
scanner = table.getScanner(new Scan());
result = scanner.next();
kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
tags = TestCoprocessorForTags.tags;
assertEquals(5L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
assertEquals(2, tags.size());
// We cannot assume the ordering of tags
List<String> tagValues = new ArrayList<>();
for (Tag tag : tags) {
tagValues.add(Bytes.toString(TagUtil.cloneValue(tag)));
}
assertTrue(tagValues.contains("tag1"));
assertTrue(tagValues.contains("tag2"));
TestCoprocessorForTags.checkTagPresence = false;
TestCoprocessorForTags.tags = null;
put = new Put(row2);
v = Bytes.toBytes(2L);
put.addColumn(f, q, v);
table.put(put);
increment = new Increment(row2);
increment.add(new KeyValue(row2, f, q, 1234L, v));
increment.setAttribute("visibility", Bytes.toBytes("tag2"));
table.increment(increment);
TestCoprocessorForTags.checkTagPresence = true;
scanner = table.getScanner(new Scan().setStartRow(row2));
result = scanner.next();
kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
tags = TestCoprocessorForTags.tags;
assertEquals(4L, Bytes.toLong(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()));
assertEquals(1, tags.size());
assertEquals("tag2", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
TestCoprocessorForTags.checkTagPresence = false;
TestCoprocessorForTags.tags = null;
// Test Append
byte[] row3 = Bytes.toBytes("r3");
put = new Put(row3);
put.addColumn(f, q, Bytes.toBytes("a"));
put.setAttribute("visibility", Bytes.toBytes("tag1"));
table.put(put);
Append append = new Append(row3);
append.add(f, q, Bytes.toBytes("b"));
table.append(append);
TestCoprocessorForTags.checkTagPresence = true;
scanner = table.getScanner(new Scan().setStartRow(row3));
result = scanner.next();
kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
tags = TestCoprocessorForTags.tags;
assertEquals(1, tags.size());
assertEquals("tag1", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
TestCoprocessorForTags.checkTagPresence = false;
TestCoprocessorForTags.tags = null;
append = new Append(row3);
append.add(new KeyValue(row3, f, q, 1234L, v));
append.setAttribute("visibility", Bytes.toBytes("tag2"));
table.append(append);
TestCoprocessorForTags.checkTagPresence = true;
scanner = table.getScanner(new Scan().setStartRow(row3));
result = scanner.next();
kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
tags = TestCoprocessorForTags.tags;
assertEquals(2, tags.size());
// We cannot assume the ordering of tags
tagValues.clear();
for (Tag tag : tags) {
tagValues.add(Bytes.toString(TagUtil.cloneValue(tag)));
}
assertTrue(tagValues.contains("tag1"));
assertTrue(tagValues.contains("tag2"));
TestCoprocessorForTags.checkTagPresence = false;
TestCoprocessorForTags.tags = null;
byte[] row4 = Bytes.toBytes("r4");
put = new Put(row4);
put.addColumn(f, q, Bytes.toBytes("a"));
table.put(put);
append = new Append(row4);
append.add(new KeyValue(row4, f, q, 1234L, v));
append.setAttribute("visibility", Bytes.toBytes("tag2"));
table.append(append);
TestCoprocessorForTags.checkTagPresence = true;
scanner = table.getScanner(new Scan().setStartRow(row4));
result = scanner.next();
kv = KeyValueUtil.ensureKeyValue(result.getColumnLatestCell(f, q));
tags = TestCoprocessorForTags.tags;
assertEquals(1, tags.size());
assertEquals("tag2", Bytes.toString(TagUtil.cloneValue(tags.get(0))));
} finally {
TestCoprocessorForTags.checkTagPresence = false;
TestCoprocessorForTags.tags = null;
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestTags method testFlushAndCompactionWithoutTags.
@Test
public void testFlushAndCompactionWithoutTags() throws Exception {
Table table = null;
try {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
byte[] fam = Bytes.toBytes("info");
byte[] row = Bytes.toBytes("rowa");
// column names
byte[] qual = Bytes.toBytes("qual");
byte[] row1 = Bytes.toBytes("rowb");
byte[] row2 = Bytes.toBytes("rowc");
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setBlockCacheEnabled(true);
// colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
desc.addFamily(colDesc);
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(desc);
table = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(row);
byte[] value = Bytes.toBytes("value");
put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
table.put(put);
admin.flush(tableName);
// We are lacking an API for confirming flush request compaction.
// Just sleep for a short time. We won't be able to confirm flush
// completion but the test won't hang now or in the future if
// default compaction policy causes compaction between flush and
// when we go to confirm it.
Thread.sleep(1000);
Put put1 = new Put(row1);
byte[] value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
Thread.sleep(1000);
Put put2 = new Put(row2);
byte[] value2 = Bytes.toBytes("1000dfsdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
table.put(put2);
admin.flush(tableName);
Thread.sleep(1000);
Scan s = new Scan(row);
ResultScanner scanner = table.getScanner(s);
try {
Result[] next = scanner.next(3);
for (Result result : next) {
CellScanner cellScanner = result.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertEquals(0, current.getTagsLength());
}
} finally {
if (scanner != null)
scanner.close();
}
admin.compact(tableName);
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
Thread.sleep(10);
}
s = new Scan(row);
scanner = table.getScanner(s);
try {
Result[] next = scanner.next(3);
for (Result result : next) {
CellScanner cellScanner = result.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
assertEquals(0, current.getTagsLength());
}
} finally {
if (scanner != null) {
scanner.close();
}
}
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestStoreFile method testEmptyStoreFileRestrictKeyRanges.
@Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFileReader reader = mock(StoreFileReader.class);
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
byte[] cf = Bytes.toBytes("ty");
when(hcd.getName()).thenReturn(cf);
when(store.getFamily()).thenReturn(hcd);
StoreFileScanner scanner = new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true);
Scan scan = new Scan();
scan.setColumnFamilyTimeRange(cf, 0, 1);
assertFalse(scanner.shouldUseScanner(scan, store, 0));
}
use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestStoreScanner method testDeleteVersionSameTimestamp.
@Test
public void testDeleteVersionSameTimestamp() throws IOException {
KeyValue[] kvs = new KeyValue[] { KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care") };
List<KeyValueScanner> scanners = scanFixture(kvs);
Scan scanSpec = new Scan(Bytes.toBytes("R1"));
try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) {
List<Cell> results = new ArrayList<>();
Assert.assertFalse(scan.next(results));
Assert.assertEquals(0, results.size());
}
}
use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestStoreScanner method SKIP_testPeek.
/**
* TODO this fails, since we don't handle deletions, etc, in peek
*/
public void SKIP_testPeek() throws Exception {
KeyValue[] kvs = new KeyValue[] { KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"), KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care") };
List<KeyValueScanner> scanners = scanFixture(kvs);
Scan scanSpec = new Scan(Bytes.toBytes("R1"));
try (StoreScanner scan = new StoreScanner(scanSpec, scanInfo, scanType, getCols("a"), scanners)) {
Assert.assertNull(scan.peek());
}
}
Aggregations