Search in sources :

Example 11 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class TestStripeCompactionPolicy method testSingleStripeDropDeletes.

@Test
public void testSingleStripeDropDeletes() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    // Test depends on this not being set to pass.  Default breaks test.  TODO: Revisit.
    conf.unset("hbase.hstore.compaction.min.size");
    StripeCompactionPolicy policy = createPolicy(conf);
    // Verify the deletes can be dropped if there are no L0 files.
    Long[][] stripes = new Long[][] { new Long[] { 3L, 2L, 2L, 2L }, new Long[] { 6L } };
    StripeInformationProvider si = createStripesWithSizes(0, 0, stripes);
    verifySingleStripeCompaction(policy, si, 0, true);
    // But cannot be dropped if there are.
    si = createStripesWithSizes(2, 2, stripes);
    verifySingleStripeCompaction(policy, si, 0, false);
    // Unless there are enough to cause L0 compaction.
    si = createStripesWithSizes(6, 2, stripes);
    ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
    sfs.addSublist(si.getLevel0Files());
    sfs.addSublist(si.getStripes().get(0));
    verifyCompaction(policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries());
    // If we cannot actually compact all files in some stripe, L0 is chosen.
    si = createStripesWithSizes(6, 2, new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } });
    verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
    // even if L0 has no file
    // if all files of stripe aren't selected, delete must not be dropped.
    stripes = new Long[][] { new Long[] { 100L, 3L, 2L, 2L, 2L }, new Long[] { 6L } };
    si = createStripesWithSizes(0, 0, stripes);
    List<HStoreFile> compactFile = new ArrayList<>();
    Iterator<HStoreFile> iter = si.getStripes().get(0).listIterator(1);
    while (iter.hasNext()) {
        compactFile.add(iter.next());
    }
    verifyCompaction(policy, si, compactFile, false, 1, null, si.getStartRow(0), si.getEndRow(0), true);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) OptionalLong(java.util.OptionalLong) ArrayList(java.util.ArrayList) StripeInformationProvider(org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) ConcatenatedLists(org.apache.hadoop.hbase.util.ConcatenatedLists) Test(org.junit.Test)

Example 12 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class TestStripeCompactionPolicy method testCheckExpiredL0Compaction.

@Test
public void testCheckExpiredL0Compaction() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    int minL0 = 100;
    conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, minL0);
    conf.setInt(MIN_FILES_KEY, 4);
    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
    long now = defaultTtl + 2;
    edge.setValue(now);
    EnvironmentEdgeManager.injectEdge(edge);
    HStoreFile expiredFile = createFile(10), notExpiredFile = createFile(10);
    when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
    when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
    List<HStoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
    List<HStoreFile> mixed = Lists.newArrayList(expiredFile, notExpiredFile);
    StripeCompactionPolicy policy = createPolicy(conf, defaultSplitSize, defaultSplitCount, defaultInitialCount, true);
    // Merge expired if there are eligible stripes.
    StripeCompactionPolicy.StripeInformationProvider si = createStripesWithFiles(null, new ArrayList<>(), mixed);
    assertFalse(policy.needsCompactions(si, al()));
    List<HStoreFile> largeMixed = new ArrayList<>();
    for (int i = 0; i < minL0 - 1; i++) {
        largeMixed.add(i % 2 == 0 ? notExpiredFile : expiredFile);
    }
    si = createStripesWithFiles(null, new ArrayList<>(), largeMixed);
    assertFalse(policy.needsCompactions(si, al()));
    si = createStripesWithFiles(null, new ArrayList<>(), expired);
    assertFalse(policy.needsSingleStripeCompaction(si));
    assertFalse(policy.hasExpiredStripes(si));
    assertTrue(policy.allL0FilesExpired(si));
    assertTrue(policy.needsCompactions(si, al()));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ArrayList(java.util.ArrayList) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) ManualEnvironmentEdge(org.apache.hadoop.hbase.util.ManualEnvironmentEdge) StripeInformationProvider(org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider) Test(org.junit.Test)

Example 13 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class TestStripeCompactionPolicy method testWithReferences.

@Test
public void testWithReferences() throws Exception {
    StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
    StripeCompactor sc = mock(StripeCompactor.class);
    HStoreFile ref = createFile();
    when(ref.isReference()).thenReturn(true);
    StripeInformationProvider si = mock(StripeInformationProvider.class);
    Collection<HStoreFile> sfs = al(ref, createFile());
    when(si.getStorefiles()).thenReturn(sfs);
    assertTrue(policy.needsCompactions(si, al()));
    StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false);
    // UnmodifiableCollection does not implement equals so we need to change it here to a
    // collection that implements it.
    assertEquals(si.getStorefiles(), new ArrayList<>(scr.getRequest().getFiles()));
    scr.execute(sc, NoLimitThroughputController.INSTANCE, null);
    verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), any(), any());
}
Also used : HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) StripeInformationProvider(org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider) Test(org.junit.Test)

Example 14 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class TestFIFOCompactionPolicy method testFIFOCompactionPolicyExpiredEmptyHFiles.

/**
 * Unit test for HBASE-21504
 */
@Test
public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception {
    TableName tableName = TableName.valueOf("testFIFOCompactionPolicyExpiredEmptyHFiles");
    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, FIFOCompactionPolicy.class.getName()).setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build()).build();
    Table table = TEST_UTIL.createTable(desc, null);
    long ts = EnvironmentEdgeManager.currentTime() - 10 * 1000;
    Put put = new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0"));
    table.put(put);
    // HFile-0
    TEST_UTIL.getAdmin().flush(tableName);
    put = new Put(Bytes.toBytes("row2")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
    table.put(put);
    final int testWaitTimeoutMs = 20000;
    // HFile-1
    TEST_UTIL.getAdmin().flush(tableName);
    HStore store = Preconditions.checkNotNull(getStoreWithName(tableName));
    Assert.assertEquals(2, store.getStorefilesCount());
    TEST_UTIL.getAdmin().majorCompact(tableName);
    TEST_UTIL.waitFor(testWaitTimeoutMs, (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);
    Assert.assertEquals(1, store.getStorefilesCount());
    HStoreFile sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
    Assert.assertEquals(0, sf.getReader().getEntries());
    put = new Put(Bytes.toBytes("row3")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
    table.put(put);
    // HFile-2
    TEST_UTIL.getAdmin().flush(tableName);
    Assert.assertEquals(2, store.getStorefilesCount());
    TEST_UTIL.getAdmin().majorCompact(tableName);
    TEST_UTIL.waitFor(testWaitTimeoutMs, (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);
    Assert.assertEquals(1, store.getStorefilesCount());
    sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
    Assert.assertEquals(0, sf.getReader().getEntries());
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) DisabledRegionSplitPolicy(org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) Waiter(org.apache.hadoop.hbase.Waiter) HStore(org.apache.hadoop.hbase.regionserver.HStore) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) ExpectedException(org.junit.rules.ExpectedException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) Test(org.junit.Test)

Example 15 with HStoreFile

use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.

the class GaussianFileListGenerator method iterator.

@Override
public Iterator<List<HStoreFile>> iterator() {
    return new Iterator<List<HStoreFile>>() {

        private GaussianRandomGenerator gen = new GaussianRandomGenerator(new MersenneTwister(random.nextInt()));

        private int count = 0;

        @Override
        public boolean hasNext() {
            return count < MAX_FILE_GEN_ITERS;
        }

        @Override
        public List<HStoreFile> next() {
            count += 1;
            ArrayList<HStoreFile> files = new ArrayList<>(NUM_FILES_GEN);
            for (int i = 0; i < NUM_FILES_GEN; i++) {
                files.add(createMockStoreFile((int) Math.ceil(Math.max(0, gen.nextNormalizedDouble() * 32 + 32))));
            }
            return files;
        }

        @Override
        public void remove() {
        }
    };
}
Also used : GaussianRandomGenerator(org.apache.commons.math3.random.GaussianRandomGenerator) Iterator(java.util.Iterator) ArrayList(java.util.ArrayList) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) MersenneTwister(org.apache.commons.math3.random.MersenneTwister)

Aggregations

HStoreFile (org.apache.hadoop.hbase.regionserver.HStoreFile)44 ArrayList (java.util.ArrayList)18 Test (org.junit.Test)16 Path (org.apache.hadoop.fs.Path)11 Configuration (org.apache.hadoop.conf.Configuration)8 HStore (org.apache.hadoop.hbase.regionserver.HStore)8 StripeInformationProvider (org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider)8 IOException (java.io.IOException)6 OptionalLong (java.util.OptionalLong)6 TableName (org.apache.hadoop.hbase.TableName)5 Put (org.apache.hadoop.hbase.client.Put)5 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)4 StoreFileReader (org.apache.hadoop.hbase.regionserver.StoreFileReader)4 ImmutableList (org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList)4 InterruptedIOException (java.io.InterruptedIOException)3 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)3 ManualEnvironmentEdge (org.apache.hadoop.hbase.util.ManualEnvironmentEdge)3 FileNotFoundException (java.io.FileNotFoundException)2