Search in sources :

Example 6 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.

the class HStore method compact.

@Override
public List<StoreFile> compact(CompactionContext compaction, ThroughputController throughputController, User user) throws IOException {
    assert compaction != null;
    List<StoreFile> sfs = null;
    CompactionRequest cr = compaction.getRequest();
    try {
        // Do all sanity checking in here if we have a valid CompactionRequest
        // because we need to clean up after it on the way out in a finally
        // block below
        long compactionStartTime = EnvironmentEdgeManager.currentTime();
        assert compaction.hasSelection();
        Collection<StoreFile> filesToCompact = cr.getFiles();
        assert !filesToCompact.isEmpty();
        synchronized (filesCompacting) {
            // sanity check: we're compacting files that this store knows about
            // TODO: change this to LOG.error() after more debugging
            Preconditions.checkArgument(filesCompacting.containsAll(filesToCompact));
        }
        // Ready to go. Have list of files to compact.
        LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in " + this + " of " + this.getRegionInfo().getRegionNameAsString() + " into tmpdir=" + fs.getTempDir() + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1));
        // Commence the compaction.
        List<Path> newFiles = compaction.compact(throughputController, user);
        long outputBytes = 0L;
        // TODO: get rid of this!
        if (!this.conf.getBoolean("hbase.hstore.compaction.complete", true)) {
            LOG.warn("hbase.hstore.compaction.complete is set to false");
            sfs = new ArrayList<>(newFiles.size());
            final boolean evictOnClose = cacheConf != null ? cacheConf.shouldEvictOnClose() : true;
            for (Path newFile : newFiles) {
                // Create storefile around what we wrote with a reader on it.
                StoreFile sf = createStoreFileAndReader(newFile);
                sf.closeReader(evictOnClose);
                sfs.add(sf);
            }
            return sfs;
        }
        // Do the steps necessary to complete the compaction.
        sfs = moveCompatedFilesIntoPlace(cr, newFiles, user);
        writeCompactionWalRecord(filesToCompact, sfs);
        replaceStoreFiles(filesToCompact, sfs);
        if (cr.isMajor()) {
            majorCompactedCellsCount += getCompactionProgress().totalCompactingKVs;
            majorCompactedCellsSize += getCompactionProgress().totalCompactedSize;
        } else {
            compactedCellsCount += getCompactionProgress().totalCompactingKVs;
            compactedCellsSize += getCompactionProgress().totalCompactedSize;
        }
        for (StoreFile sf : sfs) {
            outputBytes += sf.getReader().length();
        }
        // At this point the store will use new files for all new scanners.
        // update store size.
        completeCompaction(filesToCompact);
        long now = EnvironmentEdgeManager.currentTime();
        if (region.getRegionServerServices() != null && region.getRegionServerServices().getMetrics() != null) {
            region.getRegionServerServices().getMetrics().updateCompaction(cr.isMajor(), now - compactionStartTime, cr.getFiles().size(), newFiles.size(), cr.getSize(), outputBytes);
        }
        logCompactionEndMessage(cr, sfs, now, compactionStartTime);
        return sfs;
    } finally {
        finishCompactionRequest(cr);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)

Example 7 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.

the class TestCompactionPolicy method compactEquals.

void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak, long... expected) throws IOException {
    store.forceMajor = forcemajor;
    // Test Default compactions
    CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine.getCompactionPolicy()).selectCompaction(candidates, new ArrayList<>(), false, isOffPeak, forcemajor);
    List<StoreFile> actual = new ArrayList<>(result.getFiles());
    if (isOffPeak && !forcemajor) {
        Assert.assertTrue(result.isOffPeak());
    }
    Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
    store.forceMajor = false;
}
Also used : ArrayList(java.util.ArrayList) RatioBasedCompactionPolicy(org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)

Example 8 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.

the class TestMajorCompaction method testNonUserMajorCompactionRequest.

/**
   * Test for HBASE-5920 - Test user requested major compactions always occurring
   */
@Test
public void testNonUserMajorCompactionRequest() throws Exception {
    Store store = r.getStore(COLUMN_FAMILY);
    createStoreFile(r);
    for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
        createStoreFile(r);
    }
    store.triggerMajorCompaction();
    CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null).getRequest();
    assertNotNull("Expected to receive a compaction request", request);
    assertEquals("System-requested major compaction should not occur if there are too many store files", false, request.isMajor());
}
Also used : CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest) Test(org.junit.Test)

Example 9 with CompactionRequest

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest in project hbase by apache.

the class TestStripeStoreEngine method testCompactionContextForceSelect.

@Test
public void testCompactionContextForceSelect() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    int targetCount = 2;
    conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, targetCount);
    conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 2);
    conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, TestStoreEngine.class.getName());
    TestStoreEngine se = createEngine(conf);
    StripeCompactor mockCompactor = mock(StripeCompactor.class);
    se.setCompactorOverride(mockCompactor);
    when(mockCompactor.compact(any(CompactionRequest.class), anyInt(), anyLong(), any(byte[].class), any(byte[].class), any(byte[].class), any(byte[].class), any(ThroughputController.class), any(User.class))).thenReturn(new ArrayList<>());
    // Produce 3 L0 files.
    StoreFile sf = createFile();
    ArrayList<StoreFile> compactUs = al(sf, createFile(), createFile());
    se.getStoreFileManager().loadFiles(compactUs);
    // Create a compaction that would want to split the stripe.
    CompactionContext compaction = se.createCompaction();
    compaction.select(al(), false, false, false);
    assertEquals(3, compaction.getRequest().getFiles().size());
    // Override the file list. Granted, overriding this compaction in this manner will
    // break things in real world, but we only want to verify the override.
    compactUs.remove(sf);
    CompactionRequest req = new CompactionRequest(compactUs);
    compaction.forceSelect(req);
    assertEquals(2, compaction.getRequest().getFiles().size());
    assertFalse(compaction.getRequest().getFiles().contains(sf));
    // Make sure the correct method it called on compactor.
    compaction.compact(NoLimitThroughputController.INSTANCE, null);
    verify(mockCompactor, times(1)).compact(compaction.getRequest(), targetCount, 0L, StripeStoreFileManager.OPEN_KEY, StripeStoreFileManager.OPEN_KEY, null, null, NoLimitThroughputController.INSTANCE, null);
}
Also used : User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) ThroughputController(org.apache.hadoop.hbase.regionserver.throttle.ThroughputController) NoLimitThroughputController(org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest) StripeCompactor(org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor) Test(org.junit.Test)

Aggregations

CompactionRequest (org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)9 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)2 CompactionContext (org.apache.hadoop.hbase.regionserver.compactions.CompactionContext)2 RatioBasedCompactionPolicy (org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy)2 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1 StripeCompactor (org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor)1 NoLimitThroughputController (org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController)1 ThroughputController (org.apache.hadoop.hbase.regionserver.throttle.ThroughputController)1 User (org.apache.hadoop.hbase.security.User)1