use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestHStore method addStoreFile.
private void addStoreFile() throws IOException {
HStoreFile f = this.store.getStorefiles().iterator().next();
Path storedir = f.getPath().getParent();
long seqid = this.store.getMaxSequenceId().orElse(0L);
Configuration c = TEST_UTIL.getConfiguration();
FileSystem fs = FileSystem.get(c);
HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs).withOutputDir(storedir).withFileContext(fileContext).build();
w.appendMetadata(seqid + 1, false);
w.close();
LOG.info("Added store file:" + w.getPath());
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestHStore method testEmptyStoreFile.
/**
* Test for hbase-1686.
*/
@Test
public void testEmptyStoreFile() throws IOException {
init(this.name.getMethodName());
// Write a store file.
this.store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), null);
this.store.add(new KeyValue(row, family, qf2, 1, (byte[]) null), null);
flush(1);
// Now put in place an empty store file. Its a little tricky. Have to
// do manually with hacked in sequence id.
HStoreFile f = this.store.getStorefiles().iterator().next();
Path storedir = f.getPath().getParent();
long seqid = f.getMaxSequenceId();
Configuration c = HBaseConfiguration.create();
FileSystem fs = FileSystem.get(c);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs).withOutputDir(storedir).withFileContext(meta).build();
w.appendMetadata(seqid + 1, false);
w.close();
this.store.close();
// Reopen it... should pick up two files
this.store = new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c, false);
assertEquals(2, this.store.getStorefilesCount());
result = HBaseTestingUtil.getFromStoreFile(store, get.getRow(), qualifiers);
assertEquals(1, result.size());
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestHStoreFile method testMultipleTimestamps.
/**
* Test to ensure correctness when using StoreFile with multiple timestamps
*/
@Test
public void testMultipleTimestamps() throws IOException {
byte[] family = Bytes.toBytes("familyname");
byte[] qualifier = Bytes.toBytes("qualifier");
int numRows = 10;
long[] timestamps = new long[] { 20, 10, 5, 1 };
Scan scan = new Scan();
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path storedir = new Path(new Path(testDir, "7e0102"), Bytes.toString(family));
Path dir = new Path(storedir, "1234567890");
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withOutputDir(dir).withFileContext(meta).build();
List<KeyValue> kvList = getKeyValueSet(timestamps, numRows, qualifier, family);
for (KeyValue kv : kvList) {
writer.append(kv);
}
writer.appendMetadata(0, false);
writer.close();
HStoreFile hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true);
HStore store = mock(HStore.class);
when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of(family));
hsf.initReader();
StoreFileReader reader = hsf.getReader();
StoreFileScanner scanner = getStoreFileScanner(reader, false, false);
TreeSet<byte[]> columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
columns.add(qualifier);
scan.setTimeRange(20, 100);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
scan.setTimeRange(1, 2);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
scan.setTimeRange(8, 10);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
// lets make sure it still works with column family time ranges
scan.setColumnFamilyTimeRange(family, 7, 50);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
// This test relies on the timestamp range optimization
scan = new Scan();
scan.setTimeRange(27, 50);
assertTrue(!scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
// should still use the scanner because we override the family time range
scan = new Scan();
scan.setTimeRange(27, 50);
scan.setColumnFamilyTimeRange(family, 7, 50);
assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestHStoreFile method testBasicHalfAndHFileLinkMapFile.
/**
* Write a file and then assert that we can read from top and bottom halves using two
* HalfMapFiles, as well as one HalfMapFile and one HFileLink file.
*/
@Test
public void testBasicHalfAndHFileLinkMapFile() throws Exception {
final RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf("testBasicHalfAndHFileLinkMapFile")).build();
// The locations of HFileLink refers hfiles only should be consistent with the table dir
// create by CommonFSUtils directory, so we should make the region directory under
// the mode of CommonFSUtils.getTableDir here.
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
HStoreFile sf = new HStoreFile(this.fs, sfPath, conf, cacheConf, BloomType.NONE, true);
checkHalfHFile(regionFs, sf);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.
the class TestHStoreFile method testStoreFileReference.
@Test
public void testStoreFileReference() throws Exception {
final RegionInfo hri = RegionInfoBuilder.newBuilder(TableName.valueOf("testStoreFileReference")).build();
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
writer.close();
HStoreFile file = new HStoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE, true);
file.initReader();
StoreFileReader r = file.getReader();
assertNotNull(r);
StoreFileScanner scanner = new StoreFileScanner(r, mock(HFileScanner.class), false, false, 0, 0, false);
// Verify after instantiating scanner refCount is increased
assertTrue("Verify file is being referenced", file.isReferencedInReads());
scanner.close();
// Verify after closing scanner refCount is decreased
assertFalse("Verify file is not being referenced", file.isReferencedInReads());
}
Aggregations