use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestCompoundBloomFilter method writeStoreFile.
private Path writeStoreFile(int t, BloomType bt, List<KeyValue> kvs) throws IOException {
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZES[t]);
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build();
StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(TEST_UTIL.getDataTestDir()).withBloomType(bt).withFileContext(meta).build();
assertTrue(w.hasGeneralBloom());
assertTrue(w.getGeneralBloomWriter() instanceof CompoundBloomFilterWriter);
CompoundBloomFilterWriter cbbf = (CompoundBloomFilterWriter) w.getGeneralBloomWriter();
int keyCount = 0;
KeyValue prev = null;
LOG.debug("Total keys/values to insert: " + kvs.size());
for (KeyValue kv : kvs) {
w.append(kv);
// Validate the key count in the Bloom filter.
boolean newKey = true;
if (prev != null) {
newKey = !(bt == BloomType.ROW ? CellUtil.matchingRows(kv, prev) : CellUtil.matchingRowColumn(kv, prev));
}
if (newKey)
++keyCount;
assertEquals(keyCount, cbbf.getKeyCount());
prev = kv;
}
w.close();
return w.getPath();
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestFSErrorsExposed method testHFileScannerThrowsErrors.
/**
* Injects errors into the pread calls of an on-disk file, and makes
* sure those bubble up to the HFile scanner
*/
@Test
public void testHFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname");
HFileSystem hfs = (HFileSystem) util.getTestFileSystem();
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
FileSystem fs = new HFileSystem(faultyfs);
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs).withOutputDir(hfilePath).withFileContext(meta).build();
TestStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE);
StoreFileReader reader = sf.createReader();
HFileScanner scanner = reader.getScanner(false, true);
FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
assertNotNull(inStream);
scanner.seekTo();
// Do at least one successful read
assertTrue(scanner.next());
faultyfs.startFaults();
try {
int scanned = 0;
while (scanner.next()) {
scanned++;
}
fail("Scanner didn't throw after faults injected");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
assertTrue(ioe.getMessage().contains("Fault"));
}
// end of test so evictOnClose
reader.close(true);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestHRegionServerBulkLoad method createHFile.
/**
* Create an HFile with the given number of rows with a specified value.
*/
public static void createHFile(FileSystem fs, Path path, byte[] family, byte[] qualifier, byte[] value, int numRows) throws IOException {
HFileContext context = new HFileContextBuilder().withBlockSize(BLOCKSIZE).withCompression(COMPRESSION).build();
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path).withFileContext(context).create();
long now = System.currentTimeMillis();
try {
// subtract 2 since iterateOnSplits doesn't include boundary keys
for (int i = 0; i < numRows; i++) {
KeyValue kv = new KeyValue(rowkey(i), family, qualifier, now, value);
writer.append(kv);
}
writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(now));
} finally {
writer.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStore method testEmptyStoreFile.
/**
* Test for hbase-1686.
* @throws IOException
*/
@Test
public void testEmptyStoreFile() throws IOException {
init(this.name.getMethodName());
// Write a store file.
this.store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null);
this.store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null);
flush(1);
// Now put in place an empty store file. Its a little tricky. Have to
// do manually with hacked in sequence id.
StoreFile f = this.store.getStorefiles().iterator().next();
Path storedir = f.getPath().getParent();
long seqid = f.getMaxSequenceId();
Configuration c = HBaseConfiguration.create();
FileSystem fs = FileSystem.get(c);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs).withOutputDir(storedir).withFileContext(meta).build();
w.appendMetadata(seqid + 1, false);
w.close();
this.store.close();
// Reopen it... should pick up two files
this.store = new HStore(this.store.getHRegion(), this.store.getFamily(), c);
Assert.assertEquals(2, this.store.getStorefilesCount());
result = HBaseTestingUtility.getFromStoreFile(store, get.getRow(), qualifiers);
Assert.assertEquals(1, result.size());
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testHFileLink.
@Test
public void testHFileLink() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, testDir);
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// Try to open store file from link
StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf, BloomType.NONE);
assertTrue(storeFileInfo.isLink());
// Now confirm that I can read from the link
int count = 1;
HFileScanner s = hsf.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
Aggregations