use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestTimeRangeMapRed method verify.
private void verify(final Table table) throws IOException {
Scan scan = new Scan();
scan.addColumn(FAMILY_NAME, COLUMN_NAME);
scan.setMaxVersions(1);
ResultScanner scanner = table.getScanner(scan);
for (Result r : scanner) {
for (Cell kv : r.listCells()) {
log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv)) + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv)));
org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()), Bytes.toBoolean(CellUtil.cloneValue(kv)));
}
}
scanner.close();
}
use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestTableInputFormatScanBase method testNumOfSplits.
/**
* Tests a MR scan using data skew auto-balance
*
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
public void testNumOfSplits(String ratio, int expectedNumOfSplits) throws IOException, InterruptedException, ClassNotFoundException {
String jobName = "TestJobForNumOfSplits";
LOG.info("Before map/reduce startup - job " + jobName);
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
Scan scan = new Scan();
scan.addFamily(INPUT_FAMILYS[0]);
scan.addFamily(INPUT_FAMILYS[1]);
c.set("hbase.mapreduce.input.autobalance", "true");
c.set("hbase.mapreduce.input.autobalance.maxskewratio", ratio);
c.set(KEY_STARTROW, "");
c.set(KEY_LASTROW, "");
Job job = new Job(c, jobName);
TableMapReduceUtil.initTableMapperJob(TABLE_NAME.getNameAsString(), scan, ScanMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
TableInputFormat tif = new TableInputFormat();
tif.setConf(job.getConfiguration());
Assert.assertEquals(TABLE_NAME, table.getName());
List<InputSplit> splits = tif.getSplits(job);
Assert.assertEquals(expectedNumOfSplits, splits.size());
}
use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestWideScanner method testWideScanBatching.
@Test
public void testWideScanBatching() throws IOException {
final int batch = 256;
try {
this.r = createNewHRegion(TESTTABLEDESC, null, null);
int inserted = addWideContent(this.r);
List<Cell> results = new ArrayList<>();
Scan scan = new Scan();
scan.addFamily(A);
scan.addFamily(B);
scan.addFamily(C);
scan.setMaxVersions(100);
scan.setBatch(batch);
InternalScanner s = r.getScanner(scan);
int total = 0;
int i = 0;
boolean more;
do {
more = s.next(results);
i++;
LOG.info("iteration #" + i + ", results.size=" + results.size());
// assert that the result set is no larger
assertTrue(results.size() <= batch);
total += results.size();
if (results.size() > 0) {
// assert that all results are from the same row
byte[] row = CellUtil.cloneRow(results.get(0));
for (Cell kv : results) {
assertTrue(Bytes.equals(row, CellUtil.cloneRow(kv)));
}
}
results.clear();
// trigger ChangedReadersObservers
Iterator<KeyValueScanner> scanners = ((HRegion.RegionScannerImpl) s).storeHeap.getHeap().iterator();
while (scanners.hasNext()) {
StoreScanner ss = (StoreScanner) scanners.next();
ss.updateReaders(new ArrayList<>());
}
} while (more);
// assert that the scanner returned all values
LOG.info("inserted " + inserted + ", scanned " + total);
assertEquals(total, inserted);
s.close();
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.r);
}
}
use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestStoreFile method bloomWriteRead.
private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Exception {
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
Path f = writer.getPath();
long now = System.currentTimeMillis();
for (int i = 0; i < 2000; i += 2) {
String row = String.format(localFormatter, i);
KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(), "col".getBytes(), now, "value".getBytes());
writer.append(kv);
}
writer.close();
StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf);
reader.loadFileInfo();
reader.loadBloomfilter();
StoreFileScanner scanner = getStoreFileScanner(reader, false, false);
// check false positives rate
int falsePos = 0;
int falseNeg = 0;
for (int i = 0; i < 2000; i++) {
String row = String.format(localFormatter, i);
TreeSet<byte[]> columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
columns.add("family:col".getBytes());
Scan scan = new Scan(row.getBytes(), row.getBytes());
scan.addColumn("family".getBytes(), "family:col".getBytes());
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
when(hcd.getName()).thenReturn(Bytes.toBytes("family"));
when(store.getFamily()).thenReturn(hcd);
boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
if (i % 2 == 0) {
if (!exists)
falseNeg++;
} else {
if (exists)
falsePos++;
}
}
// evict because we are about to delete the file
reader.close(true);
fs.delete(f, true);
assertEquals("False negatives: " + falseNeg, 0, falseNeg);
int maxFalsePos = (int) (2 * 2000 * err);
assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + maxFalsePos + ")", falsePos <= maxFalsePos);
}
use of org.apache.hadoop.hbase.client.Scan in project hbase by apache.
the class TestTags method testFlushAndCompactionwithCombinations.
@Test
public void testFlushAndCompactionwithCombinations() throws Exception {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
byte[] fam = Bytes.toBytes("info");
byte[] row = Bytes.toBytes("rowa");
// column names
byte[] qual = Bytes.toBytes("qual");
byte[] row1 = Bytes.toBytes("rowb");
byte[] row2 = Bytes.toBytes("rowc");
byte[] rowd = Bytes.toBytes("rowd");
byte[] rowe = Bytes.toBytes("rowe");
Table table = null;
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setBlockCacheEnabled(true);
colDesc.setDataBlockEncoding(encoding);
desc.addFamily(colDesc);
Admin admin = TEST_UTIL.getAdmin();
admin.createTable(desc);
try {
table = TEST_UTIL.getConnection().getTable(tableName);
Put put = new Put(row);
byte[] value = Bytes.toBytes("value");
put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
int bigTagLen = Short.MAX_VALUE - 5;
put.setAttribute("visibility", new byte[bigTagLen]);
table.put(put);
Put put1 = new Put(row1);
byte[] value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
// We are lacking an API for confirming flush request compaction.
// Just sleep for a short time. We won't be able to confirm flush
// completion but the test won't hang now or in the future if
// default compaction policy causes compaction between flush and
// when we go to confirm it.
Thread.sleep(1000);
put1 = new Put(row2);
value1 = Bytes.toBytes("1000dfsdf");
put1.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
table.put(put1);
admin.flush(tableName);
Thread.sleep(1000);
Put put2 = new Put(rowd);
byte[] value2 = Bytes.toBytes("1000dfsdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
table.put(put2);
put2 = new Put(rowe);
value2 = Bytes.toBytes("1000dfsddfdf");
put2.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
put.setAttribute("visibility", Bytes.toBytes("ram"));
table.put(put2);
admin.flush(tableName);
Thread.sleep(1000);
TestCoprocessorForTags.checkTagPresence = true;
Scan s = new Scan(row);
s.setCaching(1);
ResultScanner scanner = table.getScanner(s);
try {
Result next = null;
while ((next = scanner.next()) != null) {
CellScanner cellScanner = next.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
if (CellUtil.matchingRow(current, row)) {
assertEquals(1, TestCoprocessorForTags.tags.size());
Tag tag = TestCoprocessorForTags.tags.get(0);
assertEquals(bigTagLen, tag.getValueLength());
} else {
assertEquals(0, TestCoprocessorForTags.tags.size());
}
}
} finally {
if (scanner != null) {
scanner.close();
}
TestCoprocessorForTags.checkTagPresence = false;
}
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
Thread.sleep(10);
}
TestCoprocessorForTags.checkTagPresence = true;
scanner = table.getScanner(s);
try {
Result next = null;
while ((next = scanner.next()) != null) {
CellScanner cellScanner = next.cellScanner();
cellScanner.advance();
Cell current = cellScanner.current();
if (CellUtil.matchingRow(current, row)) {
assertEquals(1, TestCoprocessorForTags.tags.size());
Tag tag = TestCoprocessorForTags.tags.get(0);
assertEquals(bigTagLen, tag.getValueLength());
} else {
assertEquals(0, TestCoprocessorForTags.tags.size());
}
}
} finally {
if (scanner != null) {
scanner.close();
}
TestCoprocessorForTags.checkTagPresence = false;
}
} finally {
if (table != null) {
table.close();
}
// delete the table
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
}
}
Aggregations