Search in sources :

Example 1 with RegionServerSpaceQuotaManager

use of org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager in project hbase by apache.

the class RSRpcServices method getSpaceQuotaSnapshots.

@Override
public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller, GetSpaceQuotaSnapshotsRequest request) throws ServiceException {
    try {
        final RegionServerSpaceQuotaManager manager = server.getRegionServerSpaceQuotaManager();
        final GetSpaceQuotaSnapshotsResponse.Builder builder = GetSpaceQuotaSnapshotsResponse.newBuilder();
        if (manager != null) {
            final Map<TableName, SpaceQuotaSnapshot> snapshots = manager.copyQuotaSnapshots();
            for (Entry<TableName, SpaceQuotaSnapshot> snapshot : snapshots.entrySet()) {
                builder.addSnapshots(TableQuotaSnapshot.newBuilder().setTableName(ProtobufUtil.toProtoTableName(snapshot.getKey())).setSnapshot(SpaceQuotaSnapshot.toProtoSnapshot(snapshot.getValue())).build());
            }
        }
        return builder.build();
    } catch (Exception e) {
        throw new ServiceException(e);
    }
}
Also used : SpaceQuotaSnapshot(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot) TableName(org.apache.hadoop.hbase.TableName) RegionServerSpaceQuotaManager(org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) IOException(java.io.IOException) LeaseStillHeldException(org.apache.hadoop.hbase.regionserver.LeaseManager.LeaseStillHeldException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) UnknownProtocolException(org.apache.hadoop.hbase.exceptions.UnknownProtocolException) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) UncheckedIOException(java.io.UncheckedIOException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) FileNotFoundException(java.io.FileNotFoundException) BindException(java.net.BindException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) GetSpaceQuotaSnapshotsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse)

Example 2 with RegionServerSpaceQuotaManager

use of org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager in project hbase by apache.

the class HRegionServer method initializeThreads.

private void initializeThreads() {
    // Cache flushing thread.
    this.cacheFlusher = new MemStoreFlusher(conf, this);
    // Compaction thread
    this.compactSplitThread = new CompactSplit(this);
    // Background thread to check for compactions; needed if region has not gotten updates
    // in a while. It will take care of not checking too frequently on store-by-store basis.
    this.compactionChecker = new CompactionChecker(this, this.compactionCheckFrequency, this);
    this.periodicFlusher = new PeriodicMemStoreFlusher(this.flushCheckFrequency, this);
    this.leaseManager = new LeaseManager(this.threadWakeFrequency);
    final boolean isSlowLogTableEnabled = conf.getBoolean(HConstants.SLOW_LOG_SYS_TABLE_ENABLED_KEY, HConstants.DEFAULT_SLOW_LOG_SYS_TABLE_ENABLED_KEY);
    if (isSlowLogTableEnabled) {
        // default chore duration: 10 min
        final int duration = conf.getInt("hbase.slowlog.systable.chore.duration", 10 * 60 * 1000);
        slowLogTableOpsChore = new SlowLogTableOpsChore(this, duration, this.namedQueueRecorder);
    }
    if (this.nonceManager != null) {
        // Create the scheduled chore that cleans up nonces.
        nonceManagerChore = this.nonceManager.createCleanupScheduledChore(this);
    }
    // Setup the Quota Manager
    rsQuotaManager = new RegionServerRpcQuotaManager(this);
    rsSpaceQuotaManager = new RegionServerSpaceQuotaManager(this);
    if (QuotaUtil.isQuotaEnabled(conf)) {
        this.fsUtilizationChore = new FileSystemUtilizationChore(this);
    }
    boolean onlyMetaRefresh = false;
    int storefileRefreshPeriod = conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, StorefileRefresherChore.DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD);
    if (storefileRefreshPeriod == 0) {
        storefileRefreshPeriod = conf.getInt(StorefileRefresherChore.REGIONSERVER_META_STOREFILE_REFRESH_PERIOD, StorefileRefresherChore.DEFAULT_REGIONSERVER_STOREFILE_REFRESH_PERIOD);
        onlyMetaRefresh = true;
    }
    if (storefileRefreshPeriod > 0) {
        this.storefileRefresher = new StorefileRefresherChore(storefileRefreshPeriod, onlyMetaRefresh, this, this);
    }
    int brokenStoreFileCleanerPeriod = conf.getInt(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_PERIOD, BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_PERIOD);
    int brokenStoreFileCleanerDelay = conf.getInt(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY, BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY);
    double brokenStoreFileCleanerDelayJitter = conf.getDouble(BrokenStoreFileCleaner.BROKEN_STOREFILE_CLEANER_DELAY_JITTER, BrokenStoreFileCleaner.DEFAULT_BROKEN_STOREFILE_CLEANER_DELAY_JITTER);
    double jitterRate = (RandomUtils.nextDouble() - 0.5D) * brokenStoreFileCleanerDelayJitter;
    long jitterValue = Math.round(brokenStoreFileCleanerDelay * jitterRate);
    this.brokenStoreFileCleaner = new BrokenStoreFileCleaner((int) (brokenStoreFileCleanerDelay + jitterValue), brokenStoreFileCleanerPeriod, this, conf, this);
    registerConfigurationObservers();
}
Also used : RegionServerRpcQuotaManager(org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager) FileSystemUtilizationChore(org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore) RegionServerSpaceQuotaManager(org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager) SlowLogTableOpsChore(org.apache.hadoop.hbase.namequeues.SlowLogTableOpsChore)

Example 3 with RegionServerSpaceQuotaManager

use of org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager in project hbase by apache.

the class CompactSplit method requestCompactionInternal.

// set protected for test
protected void requestCompactionInternal(HRegion region, HStore store, String why, int priority, boolean selectNow, CompactionLifeCycleTracker tracker, CompactionCompleteTracker completeTracker, User user) throws IOException {
    if (this.server.isStopped() || (region.getTableDescriptor() != null && !region.getTableDescriptor().isCompactionEnabled())) {
        return;
    }
    RegionServerSpaceQuotaManager spaceQuotaManager = this.server.getRegionServerSpaceQuotaManager();
    if (user != null && !Superusers.isSuperUser(user) && spaceQuotaManager != null && spaceQuotaManager.areCompactionsDisabled(region.getTableDescriptor().getTableName())) {
        // Enter here only when:
        // It's a user generated req, the user is super user, quotas enabled, compactions disabled.
        String reason = "Ignoring compaction request for " + region + " as an active space quota violation " + " policy disallows compactions.";
        tracker.notExecuted(store, reason);
        completeTracker.completed(store);
        LOG.debug(reason);
        return;
    }
    CompactionContext compaction;
    if (selectNow) {
        Optional<CompactionContext> c = selectCompaction(region, store, priority, tracker, completeTracker, user);
        if (!c.isPresent()) {
            // message logged inside
            return;
        }
        compaction = c.get();
    } else {
        compaction = null;
    }
    ThreadPoolExecutor pool;
    if (selectNow) {
        // compaction.get is safe as we will just return if selectNow is true but no compaction is
        // selected
        pool = store.throttleCompaction(compaction.getRequest().getSize()) ? longCompactions : shortCompactions;
    } else {
        // We assume that most compactions are small. So, put system compactions into small
        // pool; we will do selection there, and move to large pool if necessary.
        pool = shortCompactions;
    }
    pool.execute(new CompactionRunner(store, region, compaction, tracker, completeTracker, pool, user));
    if (LOG.isDebugEnabled()) {
        LOG.debug("Add compact mark for store {}, priority={}, current under compaction " + "store size is {}", getStoreNameForUnderCompaction(store), priority, underCompactionStores.size());
    }
    underCompactionStores.add(getStoreNameForUnderCompaction(store));
    region.incrementCompactionsQueuedCount();
    if (LOG.isDebugEnabled()) {
        String type = (pool == shortCompactions) ? "Small " : "Large ";
        LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system") + (why != null && !why.isEmpty() ? "; Because: " + why : "") + "; " + this);
    }
}
Also used : RegionServerSpaceQuotaManager(org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager) CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 4 with RegionServerSpaceQuotaManager

use of org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager in project hbase by apache.

the class HRegion method internalFlushCacheAndCommit.

@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", justification = "Intentional; notify is about completed flush")
FlushResultImpl internalFlushCacheAndCommit(WAL wal, MonitoredTask status, PrepareFlushResult prepareResult, Collection<HStore> storesToFlush) throws IOException {
    // prepare flush context is carried via PrepareFlushResult
    TreeMap<byte[], StoreFlushContext> storeFlushCtxs = prepareResult.storeFlushCtxs;
    TreeMap<byte[], List<Path>> committedFiles = prepareResult.committedFiles;
    long startTime = prepareResult.startTime;
    long flushOpSeqId = prepareResult.flushOpSeqId;
    long flushedSeqId = prepareResult.flushedSeqId;
    String s = "Flushing stores of " + this;
    status.setStatus(s);
    if (LOG.isTraceEnabled())
        LOG.trace(s);
    // Any failure from here on out will be catastrophic requiring server
    // restart so wal content can be replayed and put back into the memstore.
    // Otherwise, the snapshot content while backed up in the wal, it will not
    // be part of the current running servers state.
    boolean compactionRequested = false;
    long flushedOutputFileSize = 0;
    try {
        for (StoreFlushContext flush : storeFlushCtxs.values()) {
            flush.flushCache(status);
        }
        // all the store scanners to reset/reseek).
        for (Map.Entry<byte[], StoreFlushContext> flushEntry : storeFlushCtxs.entrySet()) {
            StoreFlushContext sfc = flushEntry.getValue();
            boolean needsCompaction = sfc.commit(status);
            if (needsCompaction) {
                compactionRequested = true;
            }
            byte[] storeName = flushEntry.getKey();
            List<Path> storeCommittedFiles = sfc.getCommittedFiles();
            committedFiles.put(storeName, storeCommittedFiles);
            // Flush committed no files, indicating flush is empty or flush was canceled
            if (storeCommittedFiles == null || storeCommittedFiles.isEmpty()) {
                MemStoreSize storeFlushableSize = prepareResult.storeFlushableSize.get(storeName);
                prepareResult.totalFlushableSize.decMemStoreSize(storeFlushableSize);
            }
            flushedOutputFileSize += sfc.getOutputFileSize();
        }
        storeFlushCtxs.clear();
        // Set down the memstore size by amount of flush.
        MemStoreSize mss = prepareResult.totalFlushableSize.getMemStoreSize();
        this.decrMemStoreSize(mss);
        // During startup, quota manager may not be initialized yet.
        if (rsServices != null) {
            RegionServerSpaceQuotaManager quotaManager = rsServices.getRegionServerSpaceQuotaManager();
            if (quotaManager != null) {
                quotaManager.getRegionSizeStore().incrementRegionSize(this.getRegionInfo(), flushedOutputFileSize);
            }
        }
        if (wal != null) {
            // write flush marker to WAL. If fail, we should throw DroppedSnapshotException
            FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.COMMIT_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles);
            WALUtil.writeFlushMarker(wal, this.getReplicationScope(), getRegionInfo(), desc, true, mvcc, regionReplicationSink.orElse(null));
        }
    } catch (Throwable t) {
        // all and sundry.
        if (wal != null) {
            try {
                FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles);
                WALUtil.writeFlushMarker(wal, this.replicationScope, getRegionInfo(), desc, false, mvcc, null);
            } catch (Throwable ex) {
                LOG.warn(getRegionInfo().getEncodedName() + " : " + "failed writing ABORT_FLUSH marker to WAL", ex);
            // ignore this since we will be aborting the RS with DSE.
            }
            wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
        }
        DroppedSnapshotException dse = new DroppedSnapshotException("region: " + Bytes.toStringBinary(getRegionInfo().getRegionName()), t);
        status.abort("Flush failed: " + StringUtils.stringifyException(t));
        // Callers for flushcache() should catch DroppedSnapshotException and abort the region server.
        // However, since we may have the region read lock, we cannot call close(true) here since
        // we cannot promote to a write lock. Instead we are setting closing so that all other region
        // operations except for close will be rejected.
        this.closing.set(true);
        if (rsServices != null) {
            // This is a safeguard against the case where the caller fails to explicitly handle aborting
            rsServices.abort("Replay of WAL required. Forcing server shutdown", dse);
        }
        throw dse;
    }
    // If we get to here, the HStores have been written.
    if (wal != null) {
        wal.completeCacheFlush(this.getRegionInfo().getEncodedNameAsBytes(), flushedSeqId);
    }
    // Record latest flush time
    for (HStore store : storesToFlush) {
        this.lastStoreFlushTimeMap.put(store, startTime);
    }
    this.maxFlushedSeqId = flushedSeqId;
    this.lastFlushOpSeqId = flushOpSeqId;
    // e.g. checkResources().
    synchronized (this) {
        // FindBugs NN_NAKED_NOTIFY
        notifyAll();
    }
    long time = EnvironmentEdgeManager.currentTime() - startTime;
    MemStoreSize mss = prepareResult.totalFlushableSize.getMemStoreSize();
    long memstoresize = this.memStoreSizing.getMemStoreSize().getDataSize();
    String msg = "Finished flush of" + " dataSize ~" + StringUtils.byteDesc(mss.getDataSize()) + "/" + mss.getDataSize() + ", heapSize ~" + StringUtils.byteDesc(mss.getHeapSize()) + "/" + mss.getHeapSize() + ", currentSize=" + StringUtils.byteDesc(memstoresize) + "/" + memstoresize + " for " + this.getRegionInfo().getEncodedName() + " in " + time + "ms, sequenceid=" + flushOpSeqId + ", compaction requested=" + compactionRequested + ((wal == null) ? "; wal=null" : "");
    LOG.info(msg);
    status.setStatus(msg);
    if (rsServices != null && rsServices.getMetrics() != null) {
        rsServices.getMetrics().updateFlush(getTableDescriptor().getTableName().getNameAsString(), time, mss.getDataSize(), flushedOutputFileSize);
    }
    return new FlushResultImpl(compactionRequested ? FlushResult.Result.FLUSHED_COMPACTION_NEEDED : FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, flushOpSeqId);
}
Also used : Path(org.apache.hadoop.fs.Path) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) RegionServerSpaceQuotaManager(org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) NavigableMap(java.util.NavigableMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashMap(java.util.HashMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Aggregations

RegionServerSpaceQuotaManager (org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager)4 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)2 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 UncheckedIOException (java.io.UncheckedIOException)1 BindException (java.net.BindException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 NavigableMap (java.util.NavigableMap)1 TreeMap (java.util.TreeMap)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ConcurrentMap (java.util.concurrent.ConcurrentMap)1 ConcurrentSkipListMap (java.util.concurrent.ConcurrentSkipListMap)1 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)1 Path (org.apache.hadoop.fs.Path)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)1 NotServingRegionException (org.apache.hadoop.hbase.NotServingRegionException)1