Search in sources :

Example 1 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestStore method addStoreFile.

private void addStoreFile() throws IOException {
    StoreFile f = this.store.getStorefiles().iterator().next();
    Path storedir = f.getPath().getParent();
    long seqid = this.store.getMaxSequenceId();
    Configuration c = TEST_UTIL.getConfiguration();
    FileSystem fs = FileSystem.get(c);
    HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
    StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs).withOutputDir(storedir).withFileContext(fileContext).build();
    w.appendMetadata(seqid + 1, false);
    w.close();
    LOG.info("Added store file:" + w.getPath());
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext)

Example 2 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestStoreFile method writeStoreFile.

private StoreFileWriter writeStoreFile(Configuration conf, CacheConfig cacheConf, Path path, int numBlocks) throws IOException {
    // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs
    int numKVs = 5 * numBlocks;
    List<KeyValue> kvs = new ArrayList<>(numKVs);
    byte[] b = Bytes.toBytes("x");
    int totalSize = 0;
    for (int i = numKVs; i > 0; i--) {
        KeyValue kv = new KeyValue(b, b, b, i, b);
        kvs.add(kv);
        // kv has memstoreTS 0, which takes 1 byte to store.
        totalSize += kv.getLength() + 1;
    }
    int blockSize = totalSize / numBlocks;
    HFileContext meta = new HFileContextBuilder().withBlockSize(blockSize).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build();
    // Make a store file and write data to it.
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(path).withMaxKeyCount(2000).withFileContext(meta).build();
    // We'll write N-1 KVs to ensure we don't write an extra block
    kvs.remove(kvs.size() - 1);
    for (KeyValue kv : kvs) {
        writer.append(kv);
    }
    writer.appendMetadata(0, false);
    writer.close();
    return writer;
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ArrayList(java.util.ArrayList) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext)

Example 3 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestStoreFile method testReseek.

/**
   * Test for HBASE-8012
   */
@Test
public void testReseek() throws Exception {
    // write the file
    Path f = new Path(ROOT_DIR, getName());
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    // Make a store file and write data to it.
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(f).withFileContext(meta).build();
    writeStoreFile(writer);
    writer.close();
    StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf);
    // Now do reseek with empty KV to position to the beginning of the file
    KeyValue k = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY);
    StoreFileScanner s = getStoreFileScanner(reader, false, false);
    s.reseek(k);
    assertNotNull("Intial reseek should position at the beginning of the file", s.peek());
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) Test(org.junit.Test)

Example 4 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestStoreFile method testReference.

/**
   * Test that our mechanism of writing store files in one region to reference
   * store files in other regions works.
   * @throws IOException
   */
@Test
public void testReference() throws IOException {
    final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
    HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri);
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    // Make a store file and write data to it.
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
    writeStoreFile(writer);
    Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
    StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE);
    StoreFileReader reader = hsf.createReader();
    // Split on a row, not in middle of row.  Midkey returned by reader
    // may be in middle of row.  Create new one with empty column and
    // timestamp.
    Cell kv = reader.midkey();
    byte[] midRow = CellUtil.cloneRow(kv);
    kv = reader.getLastKey();
    byte[] finalRow = CellUtil.cloneRow(kv);
    hsf.closeReader(true);
    // Make a reference
    HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
    Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
    StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf, BloomType.NONE);
    // Now confirm that I can read from the reference and that it only gets
    // keys from top half of the file.
    HFileScanner s = refHsf.createReader().getScanner(false, false);
    for (boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next(); ) {
        ByteBuffer bb = ByteBuffer.wrap(((KeyValue) s.getKey()).getKey());
        kv = KeyValueUtil.createKeyValueFromKey(bb);
        if (first) {
            assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), midRow, 0, midRow.length));
            first = false;
        }
    }
    assertTrue(Bytes.equals(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), finalRow, 0, finalRow.length));
}
Also used : Path(org.apache.hadoop.fs.Path) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ByteBuffer(java.nio.ByteBuffer) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 5 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestStoreFile method testReferenceToHFileLink.

/**
   * This test creates an hfile and then the dir structures and files to verify that references
   * to hfilelinks (created by snapshot clones) can be properly interpreted.
   */
@Test
public void testReferenceToHFileLink() throws IOException {
    // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
    Configuration testConf = new Configuration(this.conf);
    FSUtils.setRootDir(testConf, testDir);
    // adding legal table name chars to verify regex handles it.
    HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
    HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hri);
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
    StoreFileWriter writer = new StoreFileWriter.Builder(testConf, cacheConf, this.fs).withFilePath(regionFs.createTempName()).withFileContext(meta).build();
    writeStoreFile(writer);
    Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
    // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
    HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
    HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hriClone);
    Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
    HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
    Path linkFilePath = new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
    // create splits of the link.
    // <root>/clone/splitA/<cf>/<reftohfilelink>,
    // <root>/clone/splitB/<cf>/<reftohfilelink>
    HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
    HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
    StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
    f.createReader();
    // top
    Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true);
    // bottom
    Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);
    f.closeReader(true);
    // OK test the thing
    FSUtils.logFileSystemState(fs, testDir, LOG);
    // There is a case where a file with the hfilelink pattern is actually a daughter
    // reference to a hfile link.  This code in StoreFile that handles this case.
    // Try to open store file from link
    StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf, BloomType.NONE);
    // Now confirm that I can read from the ref to link
    int count = 1;
    HFileScanner s = hsfA.createReader().getScanner(false, false);
    s.seekTo();
    while (s.next()) {
        count++;
    }
    // read some rows here
    assertTrue(count > 0);
    // Try to open store file from link
    StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf, BloomType.NONE);
    // Now confirm that I can read from the ref to link
    HFileScanner sB = hsfB.createReader().getScanner(false, false);
    sB.seekTo();
    //count++ as seekTo() will advance the scanner
    count++;
    while (sB.next()) {
        count++;
    }
    // read the rest of the rows
    assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) Test(org.junit.Test)

Aggregations

HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)89 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)82 Path (org.apache.hadoop.fs.Path)52 Test (org.junit.Test)48 KeyValue (org.apache.hadoop.hbase.KeyValue)39 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)27 FileSystem (org.apache.hadoop.fs.FileSystem)26 Cell (org.apache.hadoop.hbase.Cell)17 HFile (org.apache.hadoop.hbase.io.hfile.HFile)16 ByteBuffer (java.nio.ByteBuffer)15 Configuration (org.apache.hadoop.conf.Configuration)14 HFileScanner (org.apache.hadoop.hbase.io.hfile.HFileScanner)12 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)12 DataOutputStream (java.io.DataOutputStream)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)6 DataBlockEncoding (org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5