use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class HFileTestBase method doTest.
@SuppressWarnings("deprecation")
public void doTest(Configuration conf, Path path, Compression.Algorithm compression) throws Exception {
// Create 10000 random test KVs
RedundantKVGenerator generator = new RedundantKVGenerator();
List<KeyValue> testKvs = generator.generateTestKeyValues(10000);
// Iterate through data block encoding and compression combinations
CacheConfig cacheConf = new CacheConfig(conf);
HFileContext fileContext = new HFileContextBuilder().withBlockSize(// small block
4096).withCompression(compression).build();
// write a new test HFile
LOG.info("Writing with " + fileContext);
FSDataOutputStream out = FS.create(path);
HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(out).withFileContext(fileContext).create();
try {
for (KeyValue kv : testKvs) {
writer.append(kv);
}
} finally {
writer.close();
out.close();
}
// read it back in
LOG.info("Reading with " + fileContext);
int i = 0;
HFileScanner scanner = null;
HFile.Reader reader = HFile.createReader(FS, path, cacheConf, true, conf);
try {
scanner = reader.getScanner(conf, false, false);
assertTrue("Initial seekTo failed", scanner.seekTo());
do {
Cell kv = scanner.getCell();
assertTrue("Read back an unexpected or invalid KV", testKvs.contains(KeyValueUtil.ensureKeyValue(kv)));
i++;
} while (scanner.next());
} finally {
reader.close();
scanner.close();
}
assertEquals("Did not read back as many KVs as written", i, testKvs.size());
// Test random seeks with pread
LOG.info("Random seeking with " + fileContext);
reader = HFile.createReader(FS, path, cacheConf, true, conf);
try {
scanner = reader.getScanner(conf, false, true);
assertTrue("Initial seekTo failed", scanner.seekTo());
for (i = 0; i < 100; i++) {
KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size()));
assertEquals("Unable to find KV as expected: " + kv, 0, scanner.seekTo(kv));
}
} finally {
scanner.close();
reader.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestSeekToBlockWithEncoders method seekToTheKey.
private void seekToTheKey(KeyValue expected, List<KeyValue> kvs, Cell toSeek) throws IOException {
// create all seekers
List<DataBlockEncoder.EncodedSeeker> encodedSeekers = new ArrayList<>();
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
if (encoding.getEncoder() == null) {
continue;
}
DataBlockEncoder encoder = encoding.getEncoder();
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(false).withCompression(Compression.Algorithm.NONE).build();
HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(conf, encoding, HFILEBLOCK_DUMMY_HEADER, meta);
ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs, encodingContext, this.useOffheapData);
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
encodedSeekers.add(seeker);
}
// test it!
// try a few random seeks
checkSeekingConsistency(encodedSeekers, toSeek, expected);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestFSErrorsExposed method testHFileScannerThrowsErrors.
/**
* Injects errors into the pread calls of an on-disk file, and makes
* sure those bubble up to the HFile scanner
*/
@Test
public void testHFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname");
HFileSystem hfs = (HFileSystem) util.getTestFileSystem();
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
FileSystem fs = new HFileSystem(faultyfs);
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs).withOutputDir(hfilePath).withFileContext(meta).build();
TestHStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
HStoreFile sf = new HStoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE, true);
sf.initReader();
StoreFileReader reader = sf.getReader();
HFileScanner scanner = reader.getScanner(false, true);
FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
assertNotNull(inStream);
scanner.seekTo();
// Do at least one successful read
assertTrue(scanner.next());
faultyfs.startFaults();
try {
int scanned = 0;
while (scanner.next()) {
scanned++;
}
fail("Scanner didn't throw after faults injected");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
assertTrue(ioe.getMessage().contains("Fault"));
}
// end of test so evictOnClose
reader.close(true);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestHRegionServerBulkLoad method createHFile.
/**
* Create an HFile with the given number of rows with a specified value.
*/
public static void createHFile(FileSystem fs, Path path, byte[] family, byte[] qualifier, byte[] value, int numRows) throws IOException {
HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE).withCompression(COMPRESSION).build();
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path).withFileContext(context).create();
long now = EnvironmentEdgeManager.currentTime();
try {
// subtract 2 since iterateOnSplits doesn't include boundary keys
for (int i = 0; i < numRows; i++) {
KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
writer.append(kv);
}
writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(now));
} finally {
writer.close();
}
}
Aggregations