Search in sources :

Example 1 with ImmutableList

use of org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList in project hbase by apache.

the class StripeCompactionPolicy method selectSingleStripeCompaction.

protected StripeCompactionRequest selectSingleStripeCompaction(StripeInformationProvider si, boolean includeL0, boolean canDropDeletesWithoutL0, boolean isOffpeak) throws IOException {
    ArrayList<ImmutableList<HStoreFile>> stripes = si.getStripes();
    int bqIndex = -1;
    List<HStoreFile> bqSelection = null;
    int stripeCount = stripes.size();
    long bqTotalSize = -1;
    for (int i = 0; i < stripeCount; ++i) {
        // If we want to compact L0 to drop deletes, we only want whole-stripe compactions.
        // So, pass includeL0 as 2nd parameter to indicate that.
        List<HStoreFile> selection = selectSimpleCompaction(stripes.get(i), !canDropDeletesWithoutL0 && includeL0, isOffpeak, false);
        if (selection.isEmpty())
            continue;
        long size = 0;
        for (HStoreFile sf : selection) {
            size += sf.getReader().length();
        }
        if (bqSelection == null || selection.size() > bqSelection.size() || (selection.size() == bqSelection.size() && size < bqTotalSize)) {
            bqSelection = selection;
            bqIndex = i;
            bqTotalSize = size;
        }
    }
    if (bqSelection == null) {
        LOG.debug("No good compaction is possible in any stripe");
        return null;
    }
    List<HStoreFile> filesToCompact = new ArrayList<>(bqSelection);
    // See if we can, and need to, split this stripe.
    int targetCount = 1;
    long targetKvs = Long.MAX_VALUE;
    boolean hasAllFiles = filesToCompact.size() == stripes.get(bqIndex).size();
    String splitString = "";
    if (hasAllFiles && bqTotalSize >= config.getSplitSize()) {
        if (includeL0) {
            // So, if we might split, don't compact the stripe with L0.
            return null;
        }
        Pair<Long, Integer> kvsAndCount = estimateTargetKvs(filesToCompact, config.getSplitCount());
        targetKvs = kvsAndCount.getFirst();
        targetCount = kvsAndCount.getSecond();
        splitString = "; the stripe will be split into at most " + targetCount + " stripes with " + targetKvs + " target KVs";
    }
    LOG.debug("Found compaction in a stripe with end key [" + Bytes.toString(si.getEndRow(bqIndex)) + "], with " + filesToCompact.size() + " files of total size " + bqTotalSize + splitString);
    // See if we can drop deletes.
    StripeCompactionRequest req;
    if (includeL0) {
        assert hasAllFiles;
        List<HStoreFile> l0Files = si.getLevel0Files();
        LOG.debug("Adding " + l0Files.size() + " files to compaction to be able to drop deletes");
        ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
        sfs.addSublist(filesToCompact);
        sfs.addSublist(l0Files);
        req = new BoundaryStripeCompactionRequest(sfs, si.getStripeBoundaries());
    } else {
        req = new SplitStripeCompactionRequest(filesToCompact, si.getStartRow(bqIndex), si.getEndRow(bqIndex), targetCount, targetKvs);
    }
    if (hasAllFiles && (canDropDeletesWithoutL0 || includeL0)) {
        req.setMajorRange(si.getStartRow(bqIndex), si.getEndRow(bqIndex));
    }
    req.getRequest().setOffPeak(isOffpeak);
    return req;
}
Also used : ImmutableList(org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList) ArrayList(java.util.ArrayList) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) ConcatenatedLists(org.apache.hadoop.hbase.util.ConcatenatedLists)

Example 2 with ImmutableList

use of org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList in project hbase by apache.

the class TestStripeCompactionPolicy method createStripesWithFiles.

/**
 * This method actually does all the work.
 */
private static StripeInformationProvider createStripesWithFiles(List<byte[]> boundaries, List<List<HStoreFile>> stripeFiles, List<HStoreFile> l0Files) throws Exception {
    ArrayList<ImmutableList<HStoreFile>> stripes = new ArrayList<>();
    ArrayList<byte[]> boundariesList = new ArrayList<>();
    StripeInformationProvider si = mock(StripeInformationProvider.class);
    if (!stripeFiles.isEmpty()) {
        assert stripeFiles.size() == (boundaries.size() + 1);
        boundariesList.add(OPEN_KEY);
        for (int i = 0; i <= boundaries.size(); ++i) {
            byte[] startKey = ((i == 0) ? OPEN_KEY : boundaries.get(i - 1));
            byte[] endKey = ((i == boundaries.size()) ? OPEN_KEY : boundaries.get(i));
            boundariesList.add(endKey);
            for (HStoreFile sf : stripeFiles.get(i)) {
                setFileStripe(sf, startKey, endKey);
            }
            stripes.add(ImmutableList.copyOf(stripeFiles.get(i)));
            when(si.getStartRow(eq(i))).thenReturn(startKey);
            when(si.getEndRow(eq(i))).thenReturn(endKey);
        }
    }
    ConcatenatedLists<HStoreFile> sfs = new ConcatenatedLists<>();
    sfs.addAllSublists(stripes);
    sfs.addSublist(l0Files);
    when(si.getStorefiles()).thenReturn(sfs);
    when(si.getStripes()).thenReturn(stripes);
    when(si.getStripeBoundaries()).thenReturn(boundariesList);
    when(si.getStripeCount()).thenReturn(stripes.size());
    when(si.getLevel0Files()).thenReturn(l0Files);
    return si;
}
Also used : ImmutableList(org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList) ArrayList(java.util.ArrayList) StripeInformationProvider(org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) ConcatenatedLists(org.apache.hadoop.hbase.util.ConcatenatedLists)

Aggregations

ArrayList (java.util.ArrayList)2 HStoreFile (org.apache.hadoop.hbase.regionserver.HStoreFile)2 ConcatenatedLists (org.apache.hadoop.hbase.util.ConcatenatedLists)2 ImmutableList (org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList)2 StripeInformationProvider (org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider)1