Search in sources :

Example 1 with CompactionContext

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionContext in project hbase by apache.

the class CompactSplitThread method requestCompactionInternal.

/**
   * @param r region store belongs to
   * @param s Store to request compaction on
   * @param why Why compaction requested -- used in debug messages
   * @param priority override the default priority (NO_PRIORITY == decide)
   * @param request custom compaction request. Can be <tt>null</tt> in which case a simple
   *          compaction will be used.
   */
private synchronized CompactionRequest requestCompactionInternal(final Region r, final Store s, final String why, int priority, CompactionRequest request, boolean selectNow, User user) throws IOException {
    if (this.server.isStopped() || (r.getTableDesc() != null && !r.getTableDesc().isCompactionEnabled())) {
        return null;
    }
    CompactionContext compaction = null;
    if (selectNow) {
        compaction = selectCompaction(r, s, priority, request, user);
        // message logged inside
        if (compaction == null)
            return null;
    }
    // We assume that most compactions are small. So, put system compactions into small
    // pool; we will do selection there, and move to large pool if necessary.
    ThreadPoolExecutor pool = (selectNow && s.throttleCompaction(compaction.getRequest().getSize())) ? longCompactions : shortCompactions;
    pool.execute(new CompactionRunner(s, r, compaction, pool, user));
    if (LOG.isDebugEnabled()) {
        String type = (pool == shortCompactions) ? "Small " : "Large ";
        LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system") + (why != null && !why.isEmpty() ? "; Because: " + why : "") + "; " + this);
    }
    return selectNow ? compaction.getRequest() : null;
}
Also used : CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 2 with CompactionContext

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionContext in project hbase by apache.

the class HRegion method compactStore.

/**
   * This is a helper function that compact the given store
   * It is used by utilities and testing
   *
   * @throws IOException e
   */
@VisibleForTesting
void compactStore(byte[] family, ThroughputController throughputController) throws IOException {
    Store s = getStore(family);
    CompactionContext compaction = s.requestCompaction();
    if (compaction != null) {
        compact(compaction, s, throughputController, null);
    }
}
Also used : CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 3 with CompactionContext

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionContext in project hbase by apache.

the class HStore method requestCompaction.

@Override
public CompactionContext requestCompaction(int priority, final CompactionRequest baseRequest, User user) throws IOException {
    // don't even select for compaction if writes are disabled
    if (!this.areWritesEnabled()) {
        return null;
    }
    // Before we do compaction, try to get rid of unneeded files to simplify things.
    removeUnneededFiles();
    final CompactionContext compaction = storeEngine.createCompaction();
    CompactionRequest request = null;
    this.lock.readLock().lock();
    try {
        synchronized (filesCompacting) {
            // First, see if coprocessor would want to override selection.
            if (this.getCoprocessorHost() != null) {
                final List<StoreFile> candidatesForCoproc = compaction.preSelect(this.filesCompacting);
                boolean override = false;
                override = getCoprocessorHost().preCompactSelection(this, candidatesForCoproc, baseRequest, user);
                if (override) {
                    // Coprocessor is overriding normal file selection.
                    compaction.forceSelect(new CompactionRequest(candidatesForCoproc));
                }
            }
            // Normal case - coprocessor is not overriding file selection.
            if (!compaction.hasSelection()) {
                boolean isUserCompaction = priority == Store.PRIORITY_USER;
                boolean mayUseOffPeak = offPeakHours.isOffPeakHour() && offPeakCompactionTracker.compareAndSet(false, true);
                try {
                    compaction.select(this.filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor && filesCompacting.isEmpty());
                } catch (IOException e) {
                    if (mayUseOffPeak) {
                        offPeakCompactionTracker.set(false);
                    }
                    throw e;
                }
                assert compaction.hasSelection();
                if (mayUseOffPeak && !compaction.getRequest().isOffPeak()) {
                    // Compaction policy doesn't want to take advantage of off-peak.
                    offPeakCompactionTracker.set(false);
                }
            }
            if (this.getCoprocessorHost() != null) {
                this.getCoprocessorHost().postCompactSelection(this, ImmutableList.copyOf(compaction.getRequest().getFiles()), baseRequest, user);
            }
            // Selected files; see if we have a compaction with some custom base request.
            if (baseRequest != null) {
                // Update the request with what the system thinks the request should be;
                // its up to the request if it wants to listen.
                compaction.forceSelect(baseRequest.combineWith(compaction.getRequest()));
            }
            // Finally, we have the resulting files list. Check if we have any files at all.
            request = compaction.getRequest();
            final Collection<StoreFile> selectedFiles = request.getFiles();
            if (selectedFiles.isEmpty()) {
                return null;
            }
            addToCompactingFiles(selectedFiles);
            // If we're enqueuing a major, clear the force flag.
            this.forceMajor = this.forceMajor && !request.isMajor();
            // Set common request properties.
            // Set priority, either override value supplied by caller or from store.
            request.setPriority((priority != Store.NO_PRIORITY) ? priority : getCompactPriority());
            request.setDescription(getRegionInfo().getRegionNameAsString(), getColumnFamilyName());
        }
    } finally {
        this.lock.readLock().unlock();
    }
    LOG.debug(getRegionInfo().getEncodedName() + " - " + getColumnFamilyName() + ": Initiating " + (request.isMajor() ? "major" : "minor") + " compaction" + (request.isAllFiles() ? " (all files)" : ""));
    this.region.reportCompactionRequestStart(request.isMajor());
    return compaction;
}
Also used : CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)

Example 4 with CompactionContext

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionContext in project hbase by apache.

the class HRegion method compact.

@Override
public void compact(final boolean majorCompaction) throws IOException {
    if (majorCompaction) {
        triggerMajorCompaction();
    }
    for (Store s : getStores()) {
        CompactionContext compaction = s.requestCompaction();
        if (compaction != null) {
            ThroughputController controller = null;
            if (rsServices != null) {
                controller = CompactionThroughputControllerFactory.create(rsServices, conf);
            }
            if (controller == null) {
                controller = NoLimitThroughputController.INSTANCE;
            }
            compact(compaction, s, controller, null);
        }
    }
}
Also used : CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) ThroughputController(org.apache.hadoop.hbase.regionserver.throttle.ThroughputController) NoLimitThroughputController(org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController)

Example 5 with CompactionContext

use of org.apache.hadoop.hbase.regionserver.compactions.CompactionContext in project hbase by apache.

the class TestHMobStore method testMOBStoreEncryption.

@Test
public void testMOBStoreEncryption() throws Exception {
    final Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
    conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
    SecureRandom rng = new SecureRandom();
    byte[] keyBytes = new byte[AES.KEY_LENGTH];
    rng.nextBytes(keyBytes);
    String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
    Key cfKey = new SecretKeySpec(keyBytes, algorithm);
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setMobEnabled(true);
    hcd.setMobThreshold(100);
    hcd.setMaxVersions(4);
    hcd.setEncryptionType(algorithm);
    hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), cfKey));
    init(name.getMethodName(), conf, hcd, false);
    this.store.add(new KeyValue(row, family, qf1, 1, value), null);
    this.store.add(new KeyValue(row, family, qf2, 1, value), null);
    this.store.add(new KeyValue(row, family, qf3, 1, value), null);
    flush(1);
    this.store.add(new KeyValue(row, family, qf4, 1, value), null);
    this.store.add(new KeyValue(row, family, qf5, 1, value), null);
    this.store.add(new KeyValue(row, family, qf6, 1, value), null);
    flush(2);
    Collection<StoreFile> storefiles = this.store.getStorefiles();
    checkMobHFileEncrytption(storefiles);
    // Scan the values
    Scan scan = new Scan(get);
    InternalScanner scanner = (InternalScanner) store.getScanner(scan, scan.getFamilyMap().get(store.getFamily().getName()), 0);
    List<Cell> results = new ArrayList<>();
    scanner.next(results);
    Collections.sort(results, CellComparator.COMPARATOR);
    scanner.close();
    Assert.assertEquals(expected.size(), results.size());
    for (int i = 0; i < results.size(); i++) {
        Assert.assertEquals(expected.get(i), results.get(i));
    }
    // Trigger major compaction
    this.store.triggerMajorCompaction();
    CompactionContext requestCompaction = this.store.requestCompaction(1, null);
    this.store.compact(requestCompaction, NoLimitThroughputController.INSTANCE, null);
    Assert.assertEquals(1, this.store.getStorefiles().size());
    //Check encryption after compaction
    checkMobHFileEncrytption(this.store.getStorefiles());
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) SecureRandom(java.security.SecureRandom) KeyProviderForTesting(org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting) CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) SecretKeySpec(javax.crypto.spec.SecretKeySpec) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Key(java.security.Key) Test(org.junit.Test)

Aggregations

CompactionContext (org.apache.hadoop.hbase.regionserver.compactions.CompactionContext)7 Test (org.junit.Test)3 Configuration (org.apache.hadoop.conf.Configuration)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)2 CompactionRequest (org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest)2 NoLimitThroughputController (org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController)2 ThroughputController (org.apache.hadoop.hbase.regionserver.throttle.ThroughputController)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 Key (java.security.Key)1 SecureRandom (java.security.SecureRandom)1 ArrayList (java.util.ArrayList)1 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)1 SecretKeySpec (javax.crypto.spec.SecretKeySpec)1 Cell (org.apache.hadoop.hbase.Cell)1 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)1 KeyValue (org.apache.hadoop.hbase.KeyValue)1 TableName (org.apache.hadoop.hbase.TableName)1