use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestStripeCompactionPolicy method testSingleStripeDropDeletes.
@Test
public void testSingleStripeDropDeletes() throws Exception {
Configuration conf = HBaseConfiguration.create();
// Test depends on this not being set to pass. Default breaks test. TODO: Revisit.
conf.unset("hbase.hstore.compaction.min.size");
StripeCompactionPolicy policy = createPolicy(conf);
// Verify the deletes can be dropped if there are no L0 files.
Long[][] stripes = new Long[][] { new Long[] { 3L, 2L, 2L, 2L }, new Long[] { 6L } };
StripeInformationProvider si = createStripesWithSizes(0, 0, stripes);
verifySingleStripeCompaction(policy, si, 0, true);
// But cannot be dropped if there are.
si = createStripesWithSizes(2, 2, stripes);
verifySingleStripeCompaction(policy, si, 0, false);
// Unless there are enough to cause L0 compaction.
si = createStripesWithSizes(6, 2, stripes);
ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
sfs.addSublist(si.getLevel0Files());
sfs.addSublist(si.getStripes().get(0));
verifyCompaction(policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries());
// If we cannot actually compact all files in some stripe, L0 is chosen.
si = createStripesWithSizes(6, 2, new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } });
verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
// even if L0 has no file
// if all files of stripe aren't selected, delete must not be dropped.
stripes = new Long[][] { new Long[] { 100L, 3L, 2L, 2L, 2L }, new Long[] { 6L } };
si = createStripesWithSizes(0, 0, stripes);
List<HStoreFile> compactFile = new ArrayList<>();
Iterator<HStoreFile> iter = si.getStripes().get(0).listIterator(1);
while (iter.hasNext()) {
compactFile.add(iter.next());
}
verifyCompaction(policy, si, compactFile, false, 1, null, si.getStartRow(0), si.getEndRow(0), true);
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestStripeCompactionPolicy method testCheckExpiredL0Compaction.
@Test
public void testCheckExpiredL0Compaction() throws Exception {
Configuration conf = HBaseConfiguration.create();
int minL0 = 100;
conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, minL0);
conf.setInt(MIN_FILES_KEY, 4);
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
long now = defaultTtl + 2;
edge.setValue(now);
EnvironmentEdgeManager.injectEdge(edge);
HStoreFile expiredFile = createFile(10), notExpiredFile = createFile(10);
when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
List<HStoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
List<HStoreFile> mixed = Lists.newArrayList(expiredFile, notExpiredFile);
StripeCompactionPolicy policy = createPolicy(conf, defaultSplitSize, defaultSplitCount, defaultInitialCount, true);
// Merge expired if there are eligible stripes.
StripeCompactionPolicy.StripeInformationProvider si = createStripesWithFiles(null, new ArrayList<>(), mixed);
assertFalse(policy.needsCompactions(si, al()));
List<HStoreFile> largeMixed = new ArrayList<>();
for (int i = 0; i < minL0 - 1; i++) {
largeMixed.add(i % 2 == 0 ? notExpiredFile : expiredFile);
}
si = createStripesWithFiles(null, new ArrayList<>(), largeMixed);
assertFalse(policy.needsCompactions(si, al()));
si = createStripesWithFiles(null, new ArrayList<>(), expired);
assertFalse(policy.needsSingleStripeCompaction(si));
assertFalse(policy.hasExpiredStripes(si));
assertTrue(policy.allL0FilesExpired(si));
assertTrue(policy.needsCompactions(si, al()));
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestStripeCompactionPolicy method testWithReferences.
@Test
public void testWithReferences() throws Exception {
StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
StripeCompactor sc = mock(StripeCompactor.class);
HStoreFile ref = createFile();
when(ref.isReference()).thenReturn(true);
StripeInformationProvider si = mock(StripeInformationProvider.class);
Collection<HStoreFile> sfs = al(ref, createFile());
when(si.getStorefiles()).thenReturn(sfs);
assertTrue(policy.needsCompactions(si, al()));
StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false);
// UnmodifiableCollection does not implement equals so we need to change it here to a
// collection that implements it.
assertEquals(si.getStorefiles(), new ArrayList<>(scr.getRequest().getFiles()));
scr.execute(sc, NoLimitThroughputController.INSTANCE, null);
verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY), any(), any());
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class TestFIFOCompactionPolicy method testFIFOCompactionPolicyExpiredEmptyHFiles.
/**
* Unit test for HBASE-21504
*/
@Test
public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception {
TableName tableName = TableName.valueOf("testFIFOCompactionPolicyExpiredEmptyHFiles");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, FIFOCompactionPolicy.class.getName()).setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build()).build();
Table table = TEST_UTIL.createTable(desc, null);
long ts = EnvironmentEdgeManager.currentTime() - 10 * 1000;
Put put = new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0"));
table.put(put);
// HFile-0
TEST_UTIL.getAdmin().flush(tableName);
put = new Put(Bytes.toBytes("row2")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
table.put(put);
final int testWaitTimeoutMs = 20000;
// HFile-1
TEST_UTIL.getAdmin().flush(tableName);
HStore store = Preconditions.checkNotNull(getStoreWithName(tableName));
Assert.assertEquals(2, store.getStorefilesCount());
TEST_UTIL.getAdmin().majorCompact(tableName);
TEST_UTIL.waitFor(testWaitTimeoutMs, (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);
Assert.assertEquals(1, store.getStorefilesCount());
HStoreFile sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
Assert.assertEquals(0, sf.getReader().getEntries());
put = new Put(Bytes.toBytes("row3")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
table.put(put);
// HFile-2
TEST_UTIL.getAdmin().flush(tableName);
Assert.assertEquals(2, store.getStorefilesCount());
TEST_UTIL.getAdmin().majorCompact(tableName);
TEST_UTIL.waitFor(testWaitTimeoutMs, (Waiter.Predicate<Exception>) () -> store.getStorefilesCount() == 1);
Assert.assertEquals(1, store.getStorefilesCount());
sf = Preconditions.checkNotNull(store.getStorefiles().iterator().next());
Assert.assertEquals(0, sf.getReader().getEntries());
}
use of org.apache.hadoop.hbase.regionserver.HStoreFile in project hbase by apache.
the class GaussianFileListGenerator method iterator.
@Override
public Iterator<List<HStoreFile>> iterator() {
return new Iterator<List<HStoreFile>>() {
private GaussianRandomGenerator gen = new GaussianRandomGenerator(new MersenneTwister(random.nextInt()));
private int count = 0;
@Override
public boolean hasNext() {
return count < MAX_FILE_GEN_ITERS;
}
@Override
public List<HStoreFile> next() {
count += 1;
ArrayList<HStoreFile> files = new ArrayList<>(NUM_FILES_GEN);
for (int i = 0; i < NUM_FILES_GEN; i++) {
files.add(createMockStoreFile((int) Math.ceil(Math.max(0, gen.nextNormalizedDouble() * 32 + 32))));
}
return files;
}
@Override
public void remove() {
}
};
}
Aggregations