use of org.apache.hadoop.hbase.io.hfile.HFileScanner in project hbase by apache.
the class TestHalfStoreFileReader method doTestOfScanAndReseek.
private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom, CacheConfig cacheConf) throws IOException {
final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p, cacheConf, bottom, TEST_UTIL.getConfiguration());
halfreader.loadFileInfo();
final HFileScanner scanner = halfreader.getScanner(false, false);
scanner.seekTo();
Cell curr;
do {
curr = scanner.getCell();
KeyValue reseekKv = getLastOnCol(curr);
int ret = scanner.reseekTo(reseekKv);
assertTrue("reseek to returned: " + ret, ret > 0);
//System.out.println(curr + ": " + ret);
} while (scanner.next());
int ret = scanner.reseekTo(getLastOnCol(curr));
//System.out.println("Last reseek: " + ret);
assertTrue(ret > 0);
halfreader.close(true);
}
use of org.apache.hadoop.hbase.io.hfile.HFileScanner in project hbase by apache.
the class TestHFileOutputFormat2 method test_WritingTagData.
/**
* Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into
* hfile.
*/
@Test
public void test_WritingTagData() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
final String HFILE_FORMAT_VERSION_CONF_KEY = "hfile.format.version";
conf.setInt(HFILE_FORMAT_VERSION_CONF_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
RecordWriter<ImmutableBytesWritable, Cell> writer = null;
TaskAttemptContext context = null;
Path dir = util.getDataTestDir("WritingTagData");
try {
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat2 hof = new HFileOutputFormat2();
writer = hof.getRecordWriter(context);
final byte[] b = Bytes.toBytes("b");
List<Tag> tags = new ArrayList<>();
tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670)));
KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags);
writer.write(new ImmutableBytesWritable(), kv);
writer.close(context);
writer = null;
FileSystem fs = dir.getFileSystem(conf);
RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(dir, true);
while (iterator.hasNext()) {
LocatedFileStatus keyFileStatus = iterator.next();
HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), conf);
HFileScanner scanner = reader.getScanner(false, false, false);
scanner.seekTo();
Cell cell = scanner.getCell();
List<Tag> tagsFromCell = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
assertTrue(tagsFromCell.size() > 0);
for (Tag tag : tagsFromCell) {
assertTrue(tag.getType() == TagType.TTL_TAG_TYPE);
}
}
} finally {
if (writer != null && context != null)
writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileScanner in project hbase by apache.
the class TestCacheOnWriteInSchema method readStoreFile.
private void readStoreFile(Path path) throws IOException {
CacheConfig cacheConf = store.getCacheConfig();
BlockCache cache = cacheConf.getBlockCache();
StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL);
HFile.Reader reader = sf.createReader().getHFileReader();
try {
// Open a scanner with (on read) caching disabled
HFileScanner scanner = reader.getScanner(false, false);
assertTrue(testDescription, scanner.seekTo());
// Cribbed from io.hfile.TestCacheOnWrite
long offset = 0;
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
// Flags: don't cache the block, use pread, this is not a compaction.
// Also, pass null for expected block type to avoid checking it.
HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
if (shouldBeCached != isCached) {
throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + "blockCacheKey: " + blockCacheKey);
}
offset += block.getOnDiskSizeWithHeader();
}
} finally {
reader.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileScanner in project hbase by apache.
the class TestFSErrorsExposed method testHFileScannerThrowsErrors.
/**
* Injects errors into the pread calls of an on-disk file, and makes
* sure those bubble up to the HFile scanner
*/
@Test
public void testHFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname");
HFileSystem hfs = (HFileSystem) util.getTestFileSystem();
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
FileSystem fs = new HFileSystem(faultyfs);
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs).withOutputDir(hfilePath).withFileContext(meta).build();
TestStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE);
StoreFileReader reader = sf.createReader();
HFileScanner scanner = reader.getScanner(false, true);
FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
assertNotNull(inStream);
scanner.seekTo();
// Do at least one successful read
assertTrue(scanner.next());
faultyfs.startFaults();
try {
int scanned = 0;
while (scanner.next()) {
scanned++;
}
fail("Scanner didn't throw after faults injected");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
assertTrue(ioe.getMessage().contains("Fault"));
}
// end of test so evictOnClose
reader.close(true);
}
use of org.apache.hadoop.hbase.io.hfile.HFileScanner in project hbase by apache.
the class TestMajorCompaction method verifyCounts.
private void verifyCounts(int countRow1, int countRow2) throws Exception {
int count1 = 0;
int count2 = 0;
for (StoreFile f : r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) {
HFileScanner scanner = f.getReader().getScanner(false, false);
scanner.seekTo();
do {
byte[] row = CellUtil.cloneRow(scanner.getCell());
if (Bytes.equals(row, STARTROW)) {
count1++;
} else if (Bytes.equals(row, secondRowBytes)) {
count2++;
}
} while (scanner.next());
}
assertEquals(countRow1, count1);
assertEquals(countRow2, count2);
}
Aggregations