Search in sources :

Example 46 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestStripeCompactionPolicy method testMergeExpiredFiles.

@SuppressWarnings("unchecked")
@Test
public void testMergeExpiredFiles() throws Exception {
    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
    long now = defaultTtl + 2;
    edge.setValue(now);
    EnvironmentEdgeManager.injectEdge(edge);
    try {
        StoreFile expiredFile = createFile(), notExpiredFile = createFile();
        when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
        when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
        List<StoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
        List<StoreFile> notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile);
        List<StoreFile> mixed = Lists.newArrayList(expiredFile, notExpiredFile);
        StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create(), defaultSplitSize, defaultSplitCount, defaultInitialCount, true);
        // Merge expired if there are eligible stripes.
        StripeCompactionPolicy.StripeInformationProvider si = createStripesWithFiles(expired, expired, expired);
        verifyWholeStripesCompaction(policy, si, 0, 2, null, 1, Long.MAX_VALUE, false);
        // Don't merge if nothing expired.
        si = createStripesWithFiles(notExpired, notExpired, notExpired);
        assertNull(policy.selectCompaction(si, al(), false));
        // Merge one expired stripe with next.
        si = createStripesWithFiles(notExpired, expired, notExpired);
        verifyWholeStripesCompaction(policy, si, 1, 2, null, 1, Long.MAX_VALUE, false);
        // Merge the biggest run out of multiple options.
        // Merge one expired stripe with next.
        si = createStripesWithFiles(notExpired, expired, notExpired, expired, expired, notExpired);
        verifyWholeStripesCompaction(policy, si, 3, 4, null, 1, Long.MAX_VALUE, false);
        // Stripe with a subset of expired files is not merged.
        si = createStripesWithFiles(expired, expired, notExpired, expired, mixed);
        verifyWholeStripesCompaction(policy, si, 0, 1, null, 1, Long.MAX_VALUE, false);
    } finally {
        EnvironmentEdgeManager.reset();
    }
}
Also used : StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) ManualEnvironmentEdge(org.apache.hadoop.hbase.util.ManualEnvironmentEdge) StripeInformationProvider(org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider) Test(org.junit.Test)

Example 47 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestStripeCompactionPolicy method testMergeExpiredStripes.

@SuppressWarnings("unchecked")
@Test
public void testMergeExpiredStripes() throws Exception {
    // HBASE-11397
    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
    long now = defaultTtl + 2;
    edge.setValue(now);
    EnvironmentEdgeManager.injectEdge(edge);
    try {
        StoreFile expiredFile = createFile(), notExpiredFile = createFile();
        when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
        when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
        List<StoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
        List<StoreFile> notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile);
        StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create(), defaultSplitSize, defaultSplitCount, defaultInitialCount, true);
        // Merge all three expired stripes into one.
        StripeCompactionPolicy.StripeInformationProvider si = createStripesWithFiles(expired, expired, expired);
        verifyMergeCompatcion(policy, si, 0, 2);
        // Merge two adjacent expired stripes into one.
        si = createStripesWithFiles(notExpired, expired, notExpired, expired, expired, notExpired);
        verifyMergeCompatcion(policy, si, 3, 4);
    } finally {
        EnvironmentEdgeManager.reset();
    }
}
Also used : StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) ManualEnvironmentEdge(org.apache.hadoop.hbase.util.ManualEnvironmentEdge) StripeInformationProvider(org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider) Test(org.junit.Test)

Example 48 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestMobFile method testReadKeyValue.

@Test
public void testReadKeyValue() throws Exception {
    Path testDir = TEST_UTIL.getDataTestDir();
    FileSystem fs = testDir.getFileSystem(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
    String caseName = getName();
    MobTestUtil.writeStoreFile(writer, caseName);
    MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE));
    byte[] family = Bytes.toBytes(caseName);
    byte[] qualify = Bytes.toBytes(caseName);
    // Test the start key
    // The start key bytes
    byte[] startKey = Bytes.toBytes("aa");
    KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
    KeyValue seekKey = expectedKey.createKeyOnly(false);
    Cell cell = mobFile.readCell(seekKey, false);
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the end key
    // The end key bytes
    byte[] endKey = Bytes.toBytes("zz");
    expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
    seekKey = expectedKey.createKeyOnly(false);
    cell = mobFile.readCell(seekKey, false);
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the random key
    byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
    expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
    seekKey = expectedKey.createKeyOnly(false);
    cell = mobFile.readCell(seekKey, false);
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the key which is less than the start key
    // Smaller than "aa"
    byte[] lowerKey = Bytes.toBytes("a1");
    expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
    seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
    cell = mobFile.readCell(seekKey, false);
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the key which is more than the end key
    // Bigger than "zz"
    byte[] upperKey = Bytes.toBytes("z{");
    seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
    cell = mobFile.readCell(seekKey, false);
    assertNull(cell);
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) KeyValue(org.apache.hadoop.hbase.KeyValue) FileSystem(org.apache.hadoop.fs.FileSystem) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) Cell(org.apache.hadoop.hbase.Cell) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) Test(org.junit.Test)

Example 49 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class TestMobFile method testGetScanner.

@Test
public void testGetScanner() throws Exception {
    Path testDir = TEST_UTIL.getDataTestDir();
    FileSystem fs = testDir.getFileSystem(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
    MobTestUtil.writeStoreFile(writer, getName());
    MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE));
    assertNotNull(mobFile.getScanner());
    assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) FileSystem(org.apache.hadoop.fs.FileSystem) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) Test(org.junit.Test)

Example 50 with StoreFile

use of org.apache.hadoop.hbase.regionserver.StoreFile in project hbase by apache.

the class FIFOCompactionPolicy method getExpiredStores.

private Collection<StoreFile> getExpiredStores(Collection<StoreFile> files, Collection<StoreFile> filesCompacting) {
    long currentTime = EnvironmentEdgeManager.currentTime();
    Collection<StoreFile> expiredStores = new ArrayList<>();
    for (StoreFile sf : files) {
        // Check MIN_VERSIONS is in HStore removeUnneededFiles
        Long maxTs = sf.getReader().getMaxTimestamp();
        long maxTtl = storeConfigInfo.getStoreFileTtl();
        if (maxTs == null || maxTtl == Long.MAX_VALUE || (currentTime - maxTtl < maxTs)) {
            continue;
        } else if (filesCompacting == null || filesCompacting.contains(sf) == false) {
            expiredStores.add(sf);
        }
    }
    return expiredStores;
}
Also used : ArrayList(java.util.ArrayList) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile)

Aggregations

StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)52 ArrayList (java.util.ArrayList)22 Path (org.apache.hadoop.fs.Path)15 Test (org.junit.Test)13 IOException (java.io.IOException)10 Store (org.apache.hadoop.hbase.regionserver.Store)6 StripeInformationProvider (org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider)6 StoreFileReader (org.apache.hadoop.hbase.regionserver.StoreFileReader)5 ImmutableList (com.google.common.collect.ImmutableList)4 Configuration (org.apache.hadoop.conf.Configuration)4 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)4 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)4 Put (org.apache.hadoop.hbase.client.Put)4 StoreFileScanner (org.apache.hadoop.hbase.regionserver.StoreFileScanner)4 FileStatus (org.apache.hadoop.fs.FileStatus)3 Cell (org.apache.hadoop.hbase.Cell)3 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)3 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)3 ConcatenatedLists (org.apache.hadoop.hbase.util.ConcatenatedLists)3 FileNotFoundException (java.io.FileNotFoundException)2