use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testMultipleTimestamps.
/**
* Test to ensure correctness when using StoreFile with multiple timestamps
* @throws IOException
*/
@Test
public void testMultipleTimestamps() throws IOException {
byte[] family = Bytes.toBytes("familyname");
byte[] qualifier = Bytes.toBytes("qualifier");
int numRows = 10;
long[] timestamps = new long[] { 20, 10, 5, 1 };
Scan scan = new Scan();
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path storedir = new Path(new Path(testDir, "7e0102"), Bytes.toString(family));
Path dir = new Path(storedir, "1234567890");
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withOutputDir(dir).withFileContext(meta).build();
List<KeyValue> kvList = getKeyValueSet(timestamps, numRows, qualifier, family);
for (KeyValue kv : kvList) {
writer.append(kv);
}
writer.appendMetadata(0, false);
writer.close();
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE);
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
when(hcd.getName()).thenReturn(family);
when(store.getFamily()).thenReturn(hcd);
StoreFileReader reader = hsf.createReader();
StoreFileScanner scanner = getStoreFileScanner(reader, false, false);
TreeSet<byte[]> columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
columns.add(qualifier);
scan.setTimeRange(20, 100);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
scan.setTimeRange(1, 2);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
scan.setTimeRange(8, 10);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
// lets make sure it still works with column family time ranges
scan.setColumnFamilyTimeRange(family, 7, 50);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
// This test relies on the timestamp range optimization
scan = new Scan();
scan.setTimeRange(27, 50);
assertTrue(!scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
// should still use the scanner because we override the family time range
scan = new Scan();
scan.setTimeRange(27, 50);
scan.setColumnFamilyTimeRange(family, 7, 50);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testDeleteFamilyBloomFilter.
@Test
public void testDeleteFamilyBloomFilter() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
// write the file
Path f = new Path(ROOT_DIR, getName());
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f).withMaxKeyCount(2000).withFileContext(meta).build();
// add delete family
long now = System.currentTimeMillis();
for (int i = 0; i < 2000; i += 2) {
String row = String.format(localFormatter, i);
KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(), "col".getBytes(), now, KeyValue.Type.DeleteFamily, "value".getBytes());
writer.append(kv);
}
writer.close();
StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf);
reader.loadFileInfo();
reader.loadBloomfilter();
// check false positives rate
int falsePos = 0;
int falseNeg = 0;
for (int i = 0; i < 2000; i++) {
String row = String.format(localFormatter, i);
byte[] rowKey = Bytes.toBytes(row);
boolean exists = reader.passesDeleteFamilyBloomFilter(rowKey, 0, rowKey.length);
if (i % 2 == 0) {
if (!exists)
falseNeg++;
} else {
if (exists)
falsePos++;
}
}
assertEquals(1000, reader.getDeleteFamilyCnt());
// evict because we are about to delete the file
reader.close(true);
fs.delete(f, true);
assertEquals("False negatives: " + falseNeg, 0, falseNeg);
int maxFalsePos = (int) (2 * 2000 * err);
assertTrue("Too many false positives: " + falsePos + " (err=" + err + ", expected no more than " + maxFalsePos, falsePos <= maxFalsePos);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testDataBlockEncodingMetaData.
/**
* Check if data block encoding information is saved correctly in HFile's
* file info.
*/
@Test
public void testDataBlockEncodingMetaData() throws IOException {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path dir = new Path(new Path(testDir, "7e0102"), "familyname");
Path path = new Path(dir, "1234567890");
DataBlockEncoding dataBlockEncoderAlgo = DataBlockEncoding.FAST_DIFF;
HFileDataBlockEncoder dataBlockEncoder = new HFileDataBlockEncoderImpl(dataBlockEncoderAlgo);
cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).withDataBlockEncoding(dataBlockEncoderAlgo).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build();
writer.close();
StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE);
StoreFileReader reader = storeFile.createReader();
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testBasicHalfMapFile.
/**
* Write a file and then assert that we can read from top and bottom halves
* using two HalfMapFiles.
* @throws Exception
*/
@Test
public void testBasicHalfMapFile() throws Exception {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testBasicHalfMapFileTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
StoreFile sf = new StoreFile(this.fs, sfPath, conf, cacheConf, BloomType.NONE);
checkHalfHFile(regionFs, sf);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testBloomFilter.
@Test
public void testBloomFilter() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
// write the file
Path f = new Path(ROOT_DIR, getName());
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f).withBloomType(BloomType.ROW).withMaxKeyCount(2000).withFileContext(meta).build();
bloomWriteRead(writer, fs);
}
Aggregations