use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testReference.
/**
* Test that our mechanism of writing store files in one region to reference
* store files in other regions works.
* @throws IOException
*/
@Test
public void testReference() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE);
StoreFileReader reader = hsf.createReader();
// Split on a row, not in middle of row. Midkey returned by reader
// may be in middle of row. Create new one with empty column and
// timestamp.
Cell kv = reader.midkey();
byte[] midRow = CellUtil.cloneRow(kv);
kv = reader.getLastKey();
byte[] finalRow = CellUtil.cloneRow(kv);
hsf.closeReader(true);
// Make a reference
HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf, BloomType.NONE);
// Now confirm that I can read from the reference and that it only gets
// keys from top half of the file.
HFileScanner s = refHsf.createReader().getScanner(false, false);
for (boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next(); ) {
ByteBuffer bb = ByteBuffer.wrap(((KeyValue) s.getKey()).getKey());
kv = KeyValueUtil.createKeyValueFromKey(bb);
if (first) {
assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), midRow, 0, midRow.length));
first = false;
}
}
assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), finalRow, 0, finalRow.length));
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestStoreFile method testReferenceToHFileLink.
/**
* This test creates an hfile and then the dir structures and files to verify that references
* to hfilelinks (created by snapshot clones) can be properly interpreted.
*/
@Test
public void testReferenceToHFileLink() throws IOException {
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, testDir);
// adding legal table name chars to verify regex handles it.
HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
StoreFileWriter writer = new StoreFileWriter.Builder(testConf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hriClone);
Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// create splits of the link.
// <root>/clone/splitA/<cf>/<reftohfilelink>,
// <root>/clone/splitB/<cf>/<reftohfilelink>
HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
f.createReader();
// top
Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true);
// bottom
Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);
f.closeReader(true);
// OK test the thing
FSUtils.logFileSystemState(fs, testDir, LOG);
// There is a case where a file with the hfilelink pattern is actually a daughter
// reference to a hfile link. This code in StoreFile that handles this case.
// Try to open store file from link
StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf, BloomType.NONE);
// Now confirm that I can read from the ref to link
int count = 1;
HFileScanner s = hsfA.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
// read some rows here
assertTrue(count > 0);
// Try to open store file from link
StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf, BloomType.NONE);
// Now confirm that I can read from the ref to link
HFileScanner sB = hsfB.createReader().getScanner(false, false);
sB.seekTo();
//count++ as seekTo() will advance the scanner
count++;
while (sB.next()) {
count++;
}
// read the rest of the rows
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestRegionObserverInterface method createHFile.
private static void createHFile(Configuration conf, FileSystem fs, Path path, byte[] family, byte[] qualifier) throws IOException {
HFileContext context = new HFileContextBuilder().build();
HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path).withFileContext(context).create();
long now = System.currentTimeMillis();
try {
for (int i = 1; i <= 9; i++) {
KeyValue kv = new KeyValue(Bytes.toBytes(i + ""), family, qualifier, now, Bytes.toBytes(i + ""));
writer.append(kv);
}
} finally {
writer.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestPartitionedMobCompactor method createMobDelFile.
/**
* Create mulitple partition delete files
*/
private void createMobDelFile(Path basePath, int startKey) throws IOException {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
MobFileName mobFileName = null;
Date today = new Date();
byte[] startRow = Bytes.toBytes(startKey);
mobFileName = MobFileName.create(startRow, MobUtils.formatDate(today), delSuffix);
StoreFileWriter mobFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFileContext(meta).withFilePath(new Path(basePath, mobFileName.getFileName())).build();
long now = System.currentTimeMillis();
try {
byte[] key = Bytes.add(Bytes.toBytes(KEYS[startKey]), Bytes.toBytes(0));
byte[] dummyData = new byte[5000];
new Random().nextBytes(dummyData);
mobFileWriter.append(new KeyValue(key, Bytes.toBytes(family), Bytes.toBytes(qf), now, Type.Delete, dummyData));
key = Bytes.add(Bytes.toBytes(KEYS[startKey]), Bytes.toBytes(2));
mobFileWriter.append(new KeyValue(key, Bytes.toBytes(family), Bytes.toBytes(qf), now, Type.Delete, dummyData));
key = Bytes.add(Bytes.toBytes(KEYS[startKey]), Bytes.toBytes(4));
mobFileWriter.append(new KeyValue(key, Bytes.toBytes(family), Bytes.toBytes(qf), now, Type.Delete, dummyData));
} finally {
mobFileWriter.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.HFileContext in project hbase by apache.
the class TestCachedMobFile method testOpenClose.
@Test
public void testOpenClose() throws Exception {
String caseName = getName();
Path testDir = TEST_UTIL.getDataTestDir();
FileSystem fs = testDir.getFileSystem(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
MobTestUtil.writeStoreFile(writer, caseName);
CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount());
cachedMobFile.open();
Assert.assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount());
cachedMobFile.open();
Assert.assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile.getReferenceCount());
cachedMobFile.close();
Assert.assertEquals(EXPECTED_REFERENCE_ONE, cachedMobFile.getReferenceCount());
cachedMobFile.close();
Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount());
}
Aggregations