Search in sources :

Example 1 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.

the class CompactSplitThread method requestCompactionInternal.

private List<CompactionRequest> requestCompactionInternal(final Region r, final String why, int p, List<Pair<CompactionRequest, Store>> requests, boolean selectNow, User user) throws IOException {
    // not a special compaction request, so make our own list
    List<CompactionRequest> ret = null;
    if (requests == null) {
        ret = selectNow ? new ArrayList<>(r.getStores().size()) : null;
        for (Store s : r.getStores()) {
            CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow, user);
            if (selectNow)
                ret.add(cr);
        }
    } else {
        // only system requests have selectNow == false
        Preconditions.checkArgument(selectNow);
        ret = new ArrayList<>(requests.size());
        for (Pair<CompactionRequest, Store> pair : requests) {
            ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst(), user));
        }
    }
    return ret;
}
Also used : ArrayList(java.util.ArrayList) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)

Example 2 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.

the class TestDefaultCompactSelection method testCompactionEmptyHFile.

@Test
public void testCompactionEmptyHFile() throws IOException {
    // Set TTL
    ScanInfo oldScanInfo = store.getScanInfo();
    ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getConfiguration(), oldScanInfo.getFamily(), oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600, oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(), oldScanInfo.getComparator());
    store.setScanInfo(newScanInfo);
    // Do not compact empty store file
    List<StoreFile> candidates = sfCreate(0);
    for (StoreFile file : candidates) {
        if (file instanceof MockStoreFile) {
            MockStoreFile mockFile = (MockStoreFile) file;
            mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
            mockFile.setEntries(0);
        }
    }
    // Test Default compactions
    CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(candidates, new ArrayList<>(), false, false, false);
    Assert.assertTrue(result.getFiles().isEmpty());
    store.setScanInfo(oldScanInfo);
}
Also used : RatioBasedCompactionPolicy(org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest) Test(org.junit.Test)

Example 3 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.

the class TestMajorCompaction method testUserMajorCompactionRequest.

/**
   * Test for HBASE-5920
   */
@Test
public void testUserMajorCompactionRequest() throws IOException {
    Store store = r.getStore(COLUMN_FAMILY);
    createStoreFile(r);
    for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
        createStoreFile(r);
    }
    store.triggerMajorCompaction();
    CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER, null).getRequest();
    assertNotNull("Expected to receive a compaction request", request);
    assertEquals("User-requested major compaction should always occur, even if there are too many store files", true, request.isMajor());
}
Also used : CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest) Test(org.junit.Test)

Example 4 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hadoop by apache.

the class TestHBaseStorageFlowRunCompaction method getFlowScannerForTestingCompaction.

private FlowScanner getFlowScannerForTestingCompaction() {
    // create a FlowScanner object with the sole purpose of invoking a process
    // summation;
    CompactionRequest request = new CompactionRequest();
    request.setIsMajor(true, true);
    // okay to pass in nulls for the constructor arguments
    // because all we want to do is invoke the process summation
    FlowScanner fs = new FlowScanner(null, null, (request.isMajor() ? FlowScannerOperation.MAJOR_COMPACTION : FlowScannerOperation.MINOR_COMPACTION));
    assertNotNull(fs);
    return fs;
}
Also used : CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)

Example 5 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.

the class HStore method requestCompaction.

@Override
public CompactionContext requestCompaction(int priority, final CompactionRequest baseRequest, User user) throws IOException {
    // don't even select for compaction if writes are disabled
    if (!this.areWritesEnabled()) {
        return null;
    }
    // Before we do compaction, try to get rid of unneeded files to simplify things.
    removeUnneededFiles();
    final CompactionContext compaction = storeEngine.createCompaction();
    CompactionRequest request = null;
    this.lock.readLock().lock();
    try {
        synchronized (filesCompacting) {
            // First, see if coprocessor would want to override selection.
            if (this.getCoprocessorHost() != null) {
                final List<StoreFile> candidatesForCoproc = compaction.preSelect(this.filesCompacting);
                boolean override = false;
                override = getCoprocessorHost().preCompactSelection(this, candidatesForCoproc, baseRequest, user);
                if (override) {
                    // Coprocessor is overriding normal file selection.
                    compaction.forceSelect(new CompactionRequest(candidatesForCoproc));
                }
            }
            // Normal case - coprocessor is not overriding file selection.
            if (!compaction.hasSelection()) {
                boolean isUserCompaction = priority == Store.PRIORITY_USER;
                boolean mayUseOffPeak = offPeakHours.isOffPeakHour() && offPeakCompactionTracker.compareAndSet(false, true);
                try {
                    compaction.select(this.filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor && filesCompacting.isEmpty());
                } catch (IOException e) {
                    if (mayUseOffPeak) {
                        offPeakCompactionTracker.set(false);
                    }
                    throw e;
                }
                assert compaction.hasSelection();
                if (mayUseOffPeak && !compaction.getRequest().isOffPeak()) {
                    // Compaction policy doesn't want to take advantage of off-peak.
                    offPeakCompactionTracker.set(false);
                }
            }
            if (this.getCoprocessorHost() != null) {
                this.getCoprocessorHost().postCompactSelection(this, ImmutableList.copyOf(compaction.getRequest().getFiles()), baseRequest, user);
            }
            // Selected files; see if we have a compaction with some custom base request.
            if (baseRequest != null) {
                // Update the request with what the system thinks the request should be;
                // its up to the request if it wants to listen.
                compaction.forceSelect(baseRequest.combineWith(compaction.getRequest()));
            }
            // Finally, we have the resulting files list. Check if we have any files at all.
            request = compaction.getRequest();
            final Collection<StoreFile> selectedFiles = request.getFiles();
            if (selectedFiles.isEmpty()) {
                return null;
            }
            addToCompactingFiles(selectedFiles);
            // If we're enqueuing a major, clear the force flag.
            this.forceMajor = this.forceMajor && !request.isMajor();
            // Set common request properties.
            // Set priority, either override value supplied by caller or from store.
            request.setPriority((priority != Store.NO_PRIORITY) ? priority : getCompactPriority());
            request.setDescription(getRegionInfo().getRegionNameAsString(), getColumnFamilyName());
        }
    } finally {
        this.lock.readLock().unlock();
    }
    LOG.debug(getRegionInfo().getEncodedName() + " - " + getColumnFamilyName() + ": Initiating " + (request.isMajor() ? "major" : "minor") + " compaction" + (request.isAllFiles() ? " (all files)" : ""));
    this.region.reportCompactionRequestStart(request.isMajor());
    return compaction;
}
Also used : CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)

Aggregations

CompactionRequest (org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)9 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)2 CompactionContext (org.apache.hadoop.hbase.regionserver.compactions.CompactionContext)2 RatioBasedCompactionPolicy (org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy)2 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1 StripeCompactor (org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor)1 NoLimitThroughputController (org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController)1 ThroughputController (org.apache.hadoop.hbase.regionserver.throttle.ThroughputController)1 User (org.apache.hadoop.hbase.security.User)1