Search in sources :

Example 21 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class HRegion method getHDFSBlocksDistribution.

public HDFSBlocksDistribution getHDFSBlocksDistribution() {
    HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
    stores.values().stream().filter(s -> s.getStorefiles() != null).flatMap(s -> s.getStorefiles().stream()).map(HStoreFile::getHDFSBlockDistribution).forEachOrdered(hdfsBlocksDistribution::add);
    return hdfsBlocksDistribution;
}
Also used : StoreSequenceId(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId) ForbidMajorCompactionChecker(org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker) FileSystem(org.apache.hadoop.fs.FileSystem) FileStatus(org.apache.hadoop.fs.FileStatus) ThroughputController(org.apache.hadoop.hbase.regionserver.throttle.ThroughputController) TableDescriptorChecker(org.apache.hadoop.hbase.util.TableDescriptorChecker) Future(java.util.concurrent.Future) Delete(org.apache.hadoop.hbase.client.Delete) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) CompoundConfiguration(org.apache.hadoop.hbase.CompoundConfiguration) REGION_NAMES_KEY(org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.REGION_NAMES_KEY) Closeables(org.apache.hbase.thirdparty.com.google.common.io.Closeables) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) Pair(org.apache.hadoop.hbase.util.Pair) Append(org.apache.hadoop.hbase.client.Append) TextFormat(org.apache.hbase.thirdparty.com.google.protobuf.TextFormat) CommonFSUtils(org.apache.hadoop.hbase.util.CommonFSUtils) NoLimitThroughputController(org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController) CellBuilderType(org.apache.hadoop.hbase.CellBuilderType) RpcCallback(org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback) Stream(java.util.stream.Stream) CompareOperator(org.apache.hadoop.hbase.CompareOperator) CellComparator(org.apache.hadoop.hbase.CellComparator) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Constructor(java.lang.reflect.Constructor) WriteEntry(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry) WALUtil(org.apache.hadoop.hbase.regionserver.wal.WALUtil) ForeignExceptionSnare(org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare) Tag(org.apache.hadoop.hbase.Tag) TaskMonitor(org.apache.hadoop.hbase.monitoring.TaskMonitor) ServerCall(org.apache.hadoop.hbase.ipc.ServerCall) RegionLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad) CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) Service(org.apache.hbase.thirdparty.com.google.protobuf.Service) IOException(java.io.IOException) RegionServerSpaceQuotaManager(org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager) ReplicationObserver(org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver) ExecutionException(java.util.concurrent.ExecutionException) TreeMap(java.util.TreeMap) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) Nullable(edu.umd.cs.findbugs.annotations.Nullable) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) CoprocessorHost(org.apache.hadoop.hbase.coprocessor.CoprocessorHost) WALKey(org.apache.hadoop.hbase.wal.WALKey) SnapshotDescriptionUtils(org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) FlushAction(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction) MobFileCache(org.apache.hadoop.hbase.mob.MobFileCache) Result(org.apache.hadoop.hbase.client.Result) HFileLink(org.apache.hadoop.hbase.io.HFileLink) Message(org.apache.hbase.thirdparty.com.google.protobuf.Message) CompletionService(java.util.concurrent.CompletionService) TagUtil(org.apache.hadoop.hbase.TagUtil) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IsolationLevel(org.apache.hadoop.hbase.client.IsolationLevel) ThreadFactory(java.util.concurrent.ThreadFactory) ExtendedCellBuilderFactory(org.apache.hadoop.hbase.ExtendedCellBuilderFactory) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Maps(org.apache.hbase.thirdparty.com.google.common.collect.Maps) ServiceDescriptor(org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.ServiceDescriptor) ProtobufUtil(org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) UUID(java.util.UUID) NavigableMap(java.util.NavigableMap) Row(org.apache.hadoop.hbase.client.Row) Collectors(java.util.stream.Collectors) CollectionUtils(org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils) Objects(java.util.Objects) Entry(java.util.Map.Entry) CompactionThroughputControllerFactory(org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory) Filter(org.apache.hadoop.hbase.filter.Filter) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor) ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) MAJOR_COMPACTION_KEY(org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY) Increment(org.apache.hadoop.hbase.client.Increment) TimeRange(org.apache.hadoop.hbase.io.TimeRange) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RpcController(org.apache.hbase.thirdparty.com.google.protobuf.RpcController) Function(java.util.function.Function) ConcurrentMap(java.util.concurrent.ConcurrentMap) User(org.apache.hadoop.hbase.security.User) ServerRegionReplicaUtil(org.apache.hadoop.hbase.util.ServerRegionReplicaUtil) HashSet(java.util.HashSet) HConstants(org.apache.hadoop.hbase.HConstants) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) CompactionLifeCycleTracker(org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker) StringUtils(org.apache.hadoop.util.StringUtils) Threads(org.apache.hadoop.hbase.util.Threads) KeyValue(org.apache.hadoop.hbase.KeyValue) Bytes(org.apache.hadoop.hbase.util.Bytes) ConfigurationManager(org.apache.hadoop.hbase.conf.ConfigurationManager) Logger(org.slf4j.Logger) CoprocessorServiceCall(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall) Lists(org.apache.hbase.thirdparty.com.google.common.collect.Lists) UnknownProtocolException(org.apache.hadoop.hbase.exceptions.UnknownProtocolException) CellUtil(org.apache.hadoop.hbase.CellUtil) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Mutation(org.apache.hadoop.hbase.client.Mutation) Arrays(java.util.Arrays) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) WALSplitUtil(org.apache.hadoop.hbase.wal.WALSplitUtil) MethodDescriptor(org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor) CellComparatorImpl(org.apache.hadoop.hbase.CellComparatorImpl) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) WAL(org.apache.hadoop.hbase.wal.WAL) MetaCellComparator(org.apache.hadoop.hbase.MetaCellComparator) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) Cell(org.apache.hadoop.hbase.Cell) NonceKey(org.apache.hadoop.hbase.util.NonceKey) Iterables(org.apache.hbase.thirdparty.com.google.common.collect.Iterables) Get(org.apache.hadoop.hbase.client.Get) HeapSize(org.apache.hadoop.hbase.io.HeapSize) Set(java.util.Set) StandardCharsets(java.nio.charset.StandardCharsets) CellScanner(org.apache.hadoop.hbase.CellScanner) PrivateCellUtil(org.apache.hadoop.hbase.PrivateCellUtil) HashedBytes(org.apache.hadoop.hbase.util.HashedBytes) FSUtils(org.apache.hadoop.hbase.util.FSUtils) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) Callable(java.util.concurrent.Callable) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) InterruptedIOException(java.io.InterruptedIOException) ArrayList(java.util.ArrayList) RandomAccess(java.util.RandomAccess) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ClassSize(org.apache.hadoop.hbase.util.ClassSize) ReadOnlyConfiguration(org.apache.hadoop.hbase.coprocessor.ReadOnlyConfiguration) TraceUtil(org.apache.hadoop.hbase.trace.TraceUtil) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Scan(org.apache.hadoop.hbase.client.Scan) Lock(java.util.concurrent.locks.Lock) OperationStatusCode(org.apache.hadoop.hbase.HConstants.OperationStatusCode) CompactionContext(org.apache.hadoop.hbase.regionserver.compactions.CompactionContext) UnsafeByteOperations(org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations) LoggerFactory(org.slf4j.LoggerFactory) ByteBuffer(java.nio.ByteBuffer) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Path(org.apache.hadoop.fs.Path) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CoprocessorRpcUtils(org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils) ParseException(java.text.ParseException) Durability(org.apache.hadoop.hbase.client.Durability) HFile(org.apache.hadoop.hbase.io.hfile.HFile) Span(io.opentelemetry.api.trace.Span) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) NavigableSet(java.util.NavigableSet) CancelableProgressable(org.apache.hadoop.hbase.util.CancelableProgressable) RegionReplicationSink(org.apache.hadoop.hbase.regionserver.regionreplication.RegionReplicationSink) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) List(java.util.List) CompactionState(org.apache.hadoop.hbase.client.CompactionState) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) Optional(java.util.Optional) RowMutations(org.apache.hadoop.hbase.client.RowMutations) StoreHotnessProtector(org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) LongAdder(java.util.concurrent.atomic.LongAdder) ROW_LOCK_READ_LOCK_KEY(org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.ROW_LOCK_READ_LOCK_KEY) Preconditions(org.apache.hbase.thirdparty.com.google.common.base.Preconditions) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) HashMap(java.util.HashMap) SnapshotManifest(org.apache.hadoop.hbase.snapshot.SnapshotManifest) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) ConcurrentMapUtils.computeIfAbsent(org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent) MutationReplay(org.apache.hadoop.hbase.wal.WALSplitUtil.MutationReplay) PropagatingConfigurationObserver(org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver) RpcServer(org.apache.hadoop.hbase.ipc.RpcServer) ReplicationUtils(org.apache.hadoop.hbase.replication.ReplicationUtils) REPLICATION_SCOPE_LOCAL(org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Put(org.apache.hadoop.hbase.client.Put) RegionReplicaUtil(org.apache.hadoop.hbase.client.RegionReplicaUtil) TimeUnit(java.util.concurrent.TimeUnit) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) EventType(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType) RpcCall(org.apache.hadoop.hbase.ipc.RpcCall) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution)

Example 22 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class HRegion method computeHDFSBlocksDistribution.

/**
 * This is a helper function to compute HDFS block distribution on demand
 * @param conf configuration
 * @param tableDescriptor TableDescriptor of the table
 * @param regionInfo encoded name of the region
 * @param tablePath the table directory
 * @return The HDFS blocks distribution for the given region.
 * @throws IOException
 */
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf, TableDescriptor tableDescriptor, RegionInfo regionInfo, Path tablePath) throws IOException {
    HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
    FileSystem fs = tablePath.getFileSystem(conf);
    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
    for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) {
        List<LocatedFileStatus> locatedFileStatusList = HRegionFileSystem.getStoreFilesLocatedStatus(regionFs, family.getNameAsString(), true);
        if (locatedFileStatusList == null) {
            continue;
        }
        for (LocatedFileStatus status : locatedFileStatusList) {
            Path p = status.getPath();
            if (StoreFileInfo.isReference(p) || HFileLink.isHFileLink(p)) {
                // Only construct StoreFileInfo object if its not a hfile, save obj
                // creation
                StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, status);
                hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
            } else if (StoreFileInfo.isHFile(p)) {
                // If its a HFile, then lets just add to the block distribution
                // lets not create more objects here, not even another HDFSBlocksDistribution
                FSUtils.addToHDFSBlocksDistribution(hdfsBlocksDistribution, status.getBlockLocations());
            } else {
                throw new IOException("path=" + p + " doesn't look like a valid StoreFile");
            }
        }
    }
    return hdfsBlocksDistribution;
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) InterruptedIOException(java.io.InterruptedIOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution)

Example 23 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class DateTieredCompactionPolicy method shouldPerformMajorCompaction.

@Override
public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact) throws IOException {
    long mcTime = getNextMajorCompactTime(filesToCompact);
    if (filesToCompact == null || mcTime == 0) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("filesToCompact: " + filesToCompact + " mcTime: " + mcTime);
        }
        return false;
    }
    // TODO: Use better method for determining stamp of last major (HBASE-2990)
    long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
    long now = EnvironmentEdgeManager.currentTime();
    if (lowTimestamp <= 0L || lowTimestamp >= (now - mcTime)) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + lowTimestamp + " now: " + now + " mcTime: " + mcTime);
        }
        return false;
    }
    long cfTTL = this.storeConfigInfo.getStoreFileTtl();
    HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
    List<Long> boundaries = getCompactBoundariesForMajor(filesToCompact, now);
    boolean[] filesInWindow = new boolean[boundaries.size()];
    for (HStoreFile file : filesToCompact) {
        OptionalLong minTimestamp = file.getMinimumTimestamp();
        long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() : Long.MIN_VALUE;
        if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) {
            LOG.debug("Major compaction triggered on store " + this + "; for TTL maintenance");
            return true;
        }
        if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) {
            LOG.debug("Major compaction triggered on store " + this + ", because there are new files and time since last major compaction " + (now - lowTimestamp) + "ms");
            return true;
        }
        int lowerWindowIndex = Collections.binarySearch(boundaries, minTimestamp.orElse(Long.MAX_VALUE));
        int upperWindowIndex = Collections.binarySearch(boundaries, file.getMaximumTimestamp().orElse(Long.MAX_VALUE));
        // Handle boundary conditions and negative values of binarySearch
        lowerWindowIndex = (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 2) : lowerWindowIndex;
        upperWindowIndex = (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 2) : upperWindowIndex;
        if (lowerWindowIndex != upperWindowIndex) {
            LOG.debug("Major compaction triggered on store " + this + "; because file " + file.getPath() + " has data with timestamps cross window boundaries");
            return true;
        } else if (filesInWindow[upperWindowIndex]) {
            LOG.debug("Major compaction triggered on store " + this + "; because there are more than one file in some windows");
            return true;
        } else {
            filesInWindow[upperWindowIndex] = true;
        }
        hdfsBlocksDistribution.add(file.getHDFSBlockDistribution());
    }
    float blockLocalityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(DNS.getHostname(comConf.conf, DNS.ServerType.REGIONSERVER));
    if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) {
        LOG.debug("Major compaction triggered on store " + this + "; to make hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex + " (min " + comConf.getMinLocalityToForceCompact() + ")");
        return true;
    }
    LOG.debug("Skipping major compaction of " + this + ", because the files are already major compacted");
    return false;
}
Also used : OptionalLong(java.util.OptionalLong) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) OptionalLong(java.util.OptionalLong) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution)

Example 24 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class RegionHDFSBlockLocationFinder method refreshAndWait.

void refreshAndWait(Collection<RegionInfo> hris) {
    ArrayList<ListenableFuture<HDFSBlocksDistribution>> regionLocationFutures = new ArrayList<>(hris.size());
    for (RegionInfo hregionInfo : hris) {
        regionLocationFutures.add(asyncGetBlockDistribution(hregionInfo));
    }
    int index = 0;
    for (RegionInfo hregionInfo : hris) {
        ListenableFuture<HDFSBlocksDistribution> future = regionLocationFutures.get(index);
        try {
            cache.put(hregionInfo, future.get());
        } catch (InterruptedException ite) {
            Thread.currentThread().interrupt();
        } catch (ExecutionException ee) {
            LOG.debug("ExecutionException during HDFSBlocksDistribution computation for region = {}", hregionInfo.getEncodedName(), ee);
        }
        index++;
    }
}
Also used : ArrayList(java.util.ArrayList) ListenableFuture(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListenableFuture) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ExecutionException(java.util.concurrent.ExecutionException) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution)

Example 25 with HDFSBlocksDistribution

use of org.apache.hadoop.hbase.HDFSBlocksDistribution in project hbase by apache.

the class RegionHDFSBlockLocationFinder method getBlockDistribution.

HDFSBlocksDistribution getBlockDistribution(RegionInfo hri) {
    HDFSBlocksDistribution blockDistbn = null;
    try {
        if (cache.asMap().containsKey(hri)) {
            blockDistbn = cache.get(hri);
            return blockDistbn;
        } else {
            LOG.trace("HDFSBlocksDistribution not found in cache for {}", hri.getRegionNameAsString());
            blockDistbn = internalGetTopBlockLocation(hri);
            cache.put(hri, blockDistbn);
            return blockDistbn;
        }
    } catch (ExecutionException e) {
        LOG.warn("Error while fetching cache entry ", e);
        blockDistbn = internalGetTopBlockLocation(hri);
        cache.put(hri, blockDistbn);
        return blockDistbn;
    }
}
Also used : ExecutionException(java.util.concurrent.ExecutionException) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution)

Aggregations

HDFSBlocksDistribution (org.apache.hadoop.hbase.HDFSBlocksDistribution)29 Test (org.junit.Test)11 Path (org.apache.hadoop.fs.Path)8 ArrayList (java.util.ArrayList)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 HashMap (java.util.HashMap)5 ExecutionException (java.util.concurrent.ExecutionException)5 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)5 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)5 IOException (java.io.IOException)4 Configuration (org.apache.hadoop.conf.Configuration)4 FileStatus (org.apache.hadoop.fs.FileStatus)4 InterruptedIOException (java.io.InterruptedIOException)3 Cell (org.apache.hadoop.hbase.Cell)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 Result (org.apache.hadoop.hbase.client.Result)3 Scan (org.apache.hadoop.hbase.client.Scan)3 TimeoutIOException (org.apache.hadoop.hbase.exceptions.TimeoutIOException)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2