use of org.apache.hadoop.hbase.util.ConcatenatedLists in project hbase by apache.
the class TestStripeCompactionPolicy method createStripesWithFiles.
/**
* This method actually does all the work.
*/
private static StripeInformationProvider createStripesWithFiles(List<byte[]> boundaries, List<List<StoreFile>> stripeFiles, List<StoreFile> l0Files) throws Exception {
ArrayList<ImmutableList<StoreFile>> stripes = new ArrayList<>();
ArrayList<byte[]> boundariesList = new ArrayList<>();
StripeInformationProvider si = mock(StripeInformationProvider.class);
if (!stripeFiles.isEmpty()) {
assert stripeFiles.size() == (boundaries.size() + 1);
boundariesList.add(OPEN_KEY);
for (int i = 0; i <= boundaries.size(); ++i) {
byte[] startKey = ((i == 0) ? OPEN_KEY : boundaries.get(i - 1));
byte[] endKey = ((i == boundaries.size()) ? OPEN_KEY : boundaries.get(i));
boundariesList.add(endKey);
for (StoreFile sf : stripeFiles.get(i)) {
setFileStripe(sf, startKey, endKey);
}
stripes.add(ImmutableList.copyOf(stripeFiles.get(i)));
when(si.getStartRow(eq(i))).thenReturn(startKey);
when(si.getEndRow(eq(i))).thenReturn(endKey);
}
}
ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<>();
sfs.addAllSublists(stripes);
sfs.addSublist(l0Files);
when(si.getStorefiles()).thenReturn(sfs);
when(si.getStripes()).thenReturn(stripes);
when(si.getStripeBoundaries()).thenReturn(boundariesList);
when(si.getStripeCount()).thenReturn(stripes.size());
when(si.getLevel0Files()).thenReturn(l0Files);
return si;
}
use of org.apache.hadoop.hbase.util.ConcatenatedLists in project hbase by apache.
the class StripeCompactionPolicy method selectSingleStripeCompaction.
protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformationProvider si, boolean includeL0, boolean canDropDeletesWithoutL0, boolean isOffpeak) throws IOException {
ArrayList<ImmutableList<HStoreFile>> stripes = si.getStripes();
int bqIndex = -1;
List<HStoreFile> bqSelection = null;
int stripeCount = stripes.size();
long bqTotalSize = -1;
for (int i = 0; i < stripeCount; ++i) {
// If we want to compact L0 to drop deletes, we only want whole-stripe compactions.
// So, pass includeL0 as 2nd parameter to indicate that.
List<HStoreFile> selection = selectSimpleCompaction(stripes.get(i), !canDropDeletesWithoutL0 && includeL0, isOffpeak, false);
if (selection.isEmpty())
continue;
long size = 0;
for (HStoreFile sf : selection) {
size += sf.getReader().length();
}
if (bqSelection == null || selection.size() > bqSelection.size() || (selection.size() == bqSelection.size() && size < bqTotalSize)) {
bqSelection = selection;
bqIndex = i;
bqTotalSize = size;
}
}
if (bqSelection == null) {
LOG.debug("No good compaction is possible in any stripe");
return null;
}
List<HStoreFile> filesToCompact = new ArrayList<>(bqSelection);
// See if we can, and need to, split this stripe.
int targetCount = 1;
long targetKvs = Long.MAX_VALUE;
boolean hasAllFiles = filesToCompact.size() == stripes.get(bqIndex).size();
String splitString = "";
if (hasAllFiles && bqTotalSize >= config.getSplitSize()) {
if (includeL0) {
// So, if we might split, don't compact the stripe with L0.
return null;
}
Pair<Long, Integer> kvsAndCount = estimateTargetKvs(filesToCompact, config.getSplitCount());
targetKvs = kvsAndCount.getFirst();
targetCount = kvsAndCount.getSecond();
splitString = "; the stripe will be split into at most " + targetCount + " stripes with " + targetKvs + " target KVs";
}
LOG.debug("Found compaction in a stripe with end key [" + Bytes.toString(si.getEndRow(bqIndex)) + "], with " + filesToCompact.size() + " files of total size " + bqTotalSize + splitString);
// See if we can drop deletes.
StripeCompactionRequest req;
if (includeL0) {
assert hasAllFiles;
List<HStoreFile> l0Files = si.getLevel0Files();
LOG.debug("Adding " + l0Files.size() + " files to compaction to be able to drop deletes");
ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
sfs.addSublist(filesToCompact);
sfs.addSublist(l0Files);
req = new BoundaryStripeCompactionRequest(sfs, si.getStripeBoundaries());
} else {
req = new SplitStripeCompactionRequest(filesToCompact, si.getStartRow(bqIndex), si.getEndRow(bqIndex), targetCount, targetKvs);
}
if (hasAllFiles && (canDropDeletesWithoutL0 || includeL0)) {
req.setMajorRange(si.getStartRow(bqIndex), si.getEndRow(bqIndex));
}
req.getRequest().setOffPeak(isOffpeak);
return req;
}
use of org.apache.hadoop.hbase.util.ConcatenatedLists in project hbase by apache.
the class TestStripeCompactionPolicy method testSingleStripeDropDeletes.
@Test
public void testSingleStripeDropDeletes() throws Exception {
Configuration conf = HBaseConfiguration.create();
// Test depends on this not being set to pass. Default breaks test. TODO: Revisit.
conf.unset("hbase.hstore.compaction.min.size");
StripeCompactionPolicy policy = createPolicy(conf);
// Verify the deletes can be dropped if there are no L0 files.
Long[][] stripes = new Long[][] { new Long[] { 3L, 2L, 2L, 2L }, new Long[] { 6L } };
StripeInformationProvider si = createStripesWithSizes(0, 0, stripes);
verifySingleStripeCompaction(policy, si, 0, true);
// But cannot be dropped if there are.
si = createStripesWithSizes(2, 2, stripes);
verifySingleStripeCompaction(policy, si, 0, false);
// Unless there are enough to cause L0 compaction.
si = createStripesWithSizes(6, 2, stripes);
ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
sfs.addSublist(si.getLevel0Files());
sfs.addSublist(si.getStripes().get(0));
verifyCompaction(policy, si, sfs, si.getStartRow(0), si.getEndRow(0), si.getStripeBoundaries());
// If we cannot actually compact all files in some stripe, L0 is chosen.
si = createStripesWithSizes(6, 2, new Long[][] { new Long[] { 10L, 1L, 1L, 1L, 1L }, new Long[] { 12L } });
verifyCompaction(policy, si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
// even if L0 has no file
// if all files of stripe aren't selected, delete must not be dropped.
stripes = new Long[][] { new Long[] { 100L, 3L, 2L, 2L, 2L }, new Long[] { 6L } };
si = createStripesWithSizes(0, 0, stripes);
List<HStoreFile> compactFile = new ArrayList<>();
Iterator<HStoreFile> iter = si.getStripes().get(0).listIterator(1);
while (iter.hasNext()) {
compactFile.add(iter.next());
}
verifyCompaction(policy, si, compactFile, false, 1, null, si.getStartRow(0), si.getEndRow(0), true);
}
use of org.apache.hadoop.hbase.util.ConcatenatedLists in project hbase by apache.
the class TestStripeCompactionPolicy method createStripesWithFiles.
/**
* This method actually does all the work.
*/
private static StripeInformationProvider createStripesWithFiles(List<byte[]> boundaries, List<List<HStoreFile>> stripeFiles, List<HStoreFile> l0Files) throws Exception {
ArrayList<ImmutableList<HStoreFile>> stripes = new ArrayList<>();
ArrayList<byte[]> boundariesList = new ArrayList<>();
StripeInformationProvider si = mock(StripeInformationProvider.class);
if (!stripeFiles.isEmpty()) {
assert stripeFiles.size() == (boundaries.size() + 1);
boundariesList.add(OPEN_KEY);
for (int i = 0; i <= boundaries.size(); ++i) {
byte[] startKey = ((i == 0) ? OPEN_KEY : boundaries.get(i - 1));
byte[] endKey = ((i == boundaries.size()) ? OPEN_KEY : boundaries.get(i));
boundariesList.add(endKey);
for (HStoreFile sf : stripeFiles.get(i)) {
setFileStripe(sf, startKey, endKey);
}
stripes.add(ImmutableList.copyOf(stripeFiles.get(i)));
when(si.getStartRow(eq(i))).thenReturn(startKey);
when(si.getEndRow(eq(i))).thenReturn(endKey);
}
}
ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
sfs.addAllSublists(stripes);
sfs.addSublist(l0Files);
when(si.getStorefiles()).thenReturn(sfs);
when(si.getStripes()).thenReturn(stripes);
when(si.getStripeBoundaries()).thenReturn(boundariesList);
when(si.getStripeCount()).thenReturn(stripes.size());
when(si.getLevel0Files()).thenReturn(l0Files);
return si;
}
Aggregations