use of org.apache.hadoop.hbase.io.hfile.HFileInfo in project hbase by apache.
the class TestHalfStoreFileReader method doTestOfSeekBefore.
private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, Cell seekBefore, CacheConfig cacheConfig) throws IOException {
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, p).build();
HFileInfo fileInfo = new HFileInfo(context, TEST_UTIL.getConfiguration());
final HalfStoreFileReader halfreader = new HalfStoreFileReader(context, fileInfo, cacheConfig, bottom, new AtomicInteger(0), TEST_UTIL.getConfiguration());
fileInfo.initMetaAndIndex(halfreader.getHFileReader());
halfreader.loadFileInfo();
final HFileScanner scanner = halfreader.getScanner(false, false);
scanner.seekBefore(seekBefore);
return scanner.getCell();
}
use of org.apache.hadoop.hbase.io.hfile.HFileInfo in project hbase by apache.
the class TestRowPrefixBloomFilter method testRowPrefixBloomFilterWithGet.
@Test
public void testRowPrefixBloomFilterWithGet() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
int expKeys = fixedLengthExpKeys;
// write the file
Path f = new Path(testDir, name.getMethodName());
writeStoreFile(f, bt, expKeys);
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
StoreFileReader reader = new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
fileInfo.initMetaAndIndex(reader.getHFileReader());
reader.loadFileInfo();
reader.loadBloomfilter();
StoreFileScanner scanner = getStoreFileScanner(reader);
HStore store = mock(HStore.class);
when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
// Get with valid row style
// prefix row in bloom
String prefixRow = String.format(prefixFormatter, prefixRowCount - 2);
String row = generateRowWithSuffix(prefixRow, 0);
Scan scan = new Scan(new Get(Bytes.toBytes(row)));
boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
assertTrue(exists);
// prefix row not in bloom
prefixRow = String.format(prefixFormatter, prefixRowCount - 1);
row = generateRowWithSuffix(prefixRow, 0);
scan = new Scan(new Get(Bytes.toBytes(row)));
exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
assertFalse(exists);
// Get with invalid row style
// ROWPREFIX: the length of row is less than prefixLength
// row in bloom
row = String.format(invalidFormatter, prefixRowCount + 2);
scan = new Scan(new Get(Bytes.toBytes(row)));
exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
assertTrue(exists);
// row not in bloom
row = String.format(invalidFormatter, prefixRowCount + 1);
scan = new Scan(new Get(Bytes.toBytes(row)));
exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
assertFalse(exists);
// evict because we are about to delete the file
reader.close(true);
fs.delete(f, true);
}
use of org.apache.hadoop.hbase.io.hfile.HFileInfo in project hbase by apache.
the class TestRowPrefixBloomFilter method testRowPrefixBloomFilterWithScan.
@Test
public void testRowPrefixBloomFilterWithScan() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
int expKeys = fixedLengthExpKeys;
// write the file
Path f = new Path(testDir, name.getMethodName());
writeStoreFile(f, bt, expKeys);
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
StoreFileReader reader = new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
fileInfo.initMetaAndIndex(reader.getHFileReader());
reader.loadFileInfo();
reader.loadBloomfilter();
StoreFileScanner scanner = getStoreFileScanner(reader);
HStore store = mock(HStore.class);
when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
// Scan with valid row style. startRow and stopRow have a common prefix.
// And the length of the common prefix is no less than prefixLength.
// prefix row in bloom
String prefixRow = String.format(prefixFormatter, prefixRowCount - 2);
String startRow = generateRowWithSuffix(prefixRow, 0);
String stopRow = generateRowWithSuffix(prefixRow, 1);
Scan scan = new Scan().withStartRow(Bytes.toBytes(startRow)).withStopRow(Bytes.toBytes(stopRow));
boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
assertTrue(exists);
// prefix row not in bloom
prefixRow = String.format(prefixFormatter, prefixRowCount - 1);
startRow = generateRowWithSuffix(prefixRow, 0);
stopRow = generateRowWithSuffix(prefixRow, 1);
scan = new Scan().withStartRow(Bytes.toBytes(startRow)).withStopRow(Bytes.toBytes(stopRow));
exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
assertFalse(exists);
// There is no common prefix between startRow and stopRow.
prefixRow = String.format(prefixFormatter, prefixRowCount - 2);
startRow = generateRowWithSuffix(prefixRow, 0);
scan = new Scan().withStartRow(Bytes.toBytes(startRow));
exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
assertTrue(exists);
// startRow and stopRow have a common prefix.
// But the length of the common prefix is less than prefixLength.
String prefixStartRow = String.format(prefixFormatter, prefixRowCount - 2);
String prefixStopRow = String.format(prefixFormatter, prefixRowCount - 1);
startRow = generateRowWithSuffix(prefixStartRow, 0);
stopRow = generateRowWithSuffix(prefixStopRow, 0);
scan = new Scan().withStartRow(Bytes.toBytes(startRow)).withStopRow(Bytes.toBytes(stopRow));
exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
assertTrue(exists);
// evict because we are about to delete the file
reader.close(true);
fs.delete(f, true);
}
use of org.apache.hadoop.hbase.io.hfile.HFileInfo in project hbase by apache.
the class BulkLoadHFilesTool method copyHFileHalf.
/**
* Copy half of an HFile into a new HFile.
*/
private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Reference reference, ColumnFamilyDescriptor familyDescriptor) throws IOException {
FileSystem fs = inFile.getFileSystem(conf);
CacheConfig cacheConf = CacheConfig.DISABLED;
HalfStoreFileReader halfReader = null;
StoreFileWriter halfWriter = null;
try {
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, inFile).build();
HFileInfo hfile = new HFileInfo(context, conf);
halfReader = new HalfStoreFileReader(context, hfile, cacheConf, reference, new AtomicInteger(0), conf);
hfile.initMetaAndIndex(halfReader.getHFileReader());
Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();
int blocksize = familyDescriptor.getBlocksize();
Algorithm compression = familyDescriptor.getCompressionType();
BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
HFileContext hFileContext = new HFileContextBuilder().withCompression(compression).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize).withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true).build();
halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile).withBloomType(bloomFilterType).withFileContext(hFileContext).build();
HFileScanner scanner = halfReader.getScanner(false, false, false);
scanner.seekTo();
do {
halfWriter.append(scanner.getCell());
} while (scanner.next());
for (Map.Entry<byte[], byte[]> entry : fileInfo.entrySet()) {
if (shouldCopyHFileMetaKey(entry.getKey())) {
halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
}
}
} finally {
if (halfReader != null) {
try {
halfReader.close(cacheConf.shouldEvictOnClose());
} catch (IOException e) {
LOG.warn("failed to close hfile reader for " + inFile, e);
}
}
if (halfWriter != null) {
halfWriter.close();
}
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileInfo in project hbase by apache.
the class TestHStoreFile method testReseek.
/**
* Test for HBASE-8012
*/
@Test
public void testReseek() throws Exception {
// write the file
Path f = new Path(ROOT_DIR, name.getMethodName());
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f).withFileContext(meta).build();
writeStoreFile(writer);
writer.close();
ReaderContext context = new ReaderContextBuilder().withFileSystemAndPath(fs, f).build();
HFileInfo fileInfo = new HFileInfo(context, conf);
StoreFileReader reader = new StoreFileReader(context, fileInfo, cacheConf, new AtomicInteger(0), conf);
fileInfo.initMetaAndIndex(reader.getHFileReader());
// Now do reseek with empty KV to position to the beginning of the file
KeyValue k = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY);
StoreFileScanner s = getStoreFileScanner(reader, false, false);
s.reseek(k);
assertNotNull("Intial reseek should position at the beginning of the file", s.peek());
}
Aggregations