Search in sources :

Example 81 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class CatalogJanitor method cleanParent.

static boolean cleanParent(MasterServices services, RegionInfo parent, Result rowContent) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Cleaning parent region {}", parent);
    }
    // Check whether it is a merged region and if it is clean of references.
    if (CatalogFamilyFormat.hasMergeRegions(rowContent.rawCells())) {
        // Wait until clean of merge parent regions first
        if (LOG.isDebugEnabled()) {
            LOG.debug("Region {} has merge parents, cleaning them first", parent);
        }
        return false;
    }
    // Run checks on each daughter split.
    PairOfSameType<RegionInfo> daughters = MetaTableAccessor.getDaughterRegions(rowContent);
    Pair<Boolean, Boolean> a = checkDaughterInFs(services, parent, daughters.getFirst());
    Pair<Boolean, Boolean> b = checkDaughterInFs(services, parent, daughters.getSecond());
    if (hasNoReferences(a) && hasNoReferences(b)) {
        String daughterA = daughters.getFirst() != null ? daughters.getFirst().getShortNameToLog() : "null";
        String daughterB = daughters.getSecond() != null ? daughters.getSecond().getShortNameToLog() : "null";
        if (LOG.isDebugEnabled()) {
            LOG.debug("Deleting region " + parent.getShortNameToLog() + " because daughters -- " + daughterA + ", " + daughterB + " -- no longer hold references");
        }
        ProcedureExecutor<MasterProcedureEnv> pe = services.getMasterProcedureExecutor();
        GCRegionProcedure gcRegionProcedure = new GCRegionProcedure(pe.getEnvironment(), parent);
        pe.submitProcedure(gcRegionProcedure);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Submitted procedure {} for split parent {}", gcRegionProcedure, parent);
        }
        return true;
    } else {
        if (LOG.isDebugEnabled()) {
            if (!hasNoReferences(a)) {
                LOG.debug("Deferring removal of region {} because daughter {} still has references", parent, daughters.getFirst());
            }
            if (!hasNoReferences(b)) {
                LOG.debug("Deferring removal of region {} because daughter {} still has references", parent, daughters.getSecond());
            }
        }
    }
    return false;
}
Also used : RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) GCRegionProcedure(org.apache.hadoop.hbase.master.assignment.GCRegionProcedure) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Example 82 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class RegionReplicationSink method send.

private void send() {
    List<SinkEntry> toSend = new ArrayList<>();
    long totalSize = 0L;
    boolean hasMetaEdit = false;
    for (SinkEntry entry; ; ) {
        entry = entries.poll();
        if (entry == null) {
            break;
        }
        toSend.add(entry);
        totalSize += entry.size;
        hasMetaEdit |= entry.edit.isMetaEdit();
        if (toSend.size() >= batchCountCapacity || totalSize >= batchSizeCapacity) {
            break;
        }
    }
    int toSendReplicaCount = regionReplication - 1 - failedReplicas.size();
    if (toSendReplicaCount <= 0) {
        return;
    }
    long rpcTimeoutNsToUse;
    long operationTimeoutNsToUse;
    if (!hasMetaEdit) {
        rpcTimeoutNsToUse = rpcTimeoutNs;
        operationTimeoutNsToUse = operationTimeoutNs;
    } else {
        rpcTimeoutNsToUse = metaEditRpcTimeoutNs;
        operationTimeoutNsToUse = metaEditOperationTimeoutNs;
    }
    sending = true;
    List<WAL.Entry> walEntries = toSend.stream().map(e -> new WAL.Entry(e.key, e.edit)).collect(Collectors.toList());
    AtomicInteger remaining = new AtomicInteger(toSendReplicaCount);
    Map<Integer, MutableObject<Throwable>> replica2Error = new HashMap<>();
    for (int replicaId = 1; replicaId < regionReplication; replicaId++) {
        if (failedReplicas.contains(replicaId)) {
            continue;
        }
        MutableObject<Throwable> error = new MutableObject<>();
        replica2Error.put(replicaId, error);
        RegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(primary, replicaId);
        FutureUtils.addListener(conn.replicate(replica, walEntries, retries, rpcTimeoutNsToUse, operationTimeoutNsToUse), (r, e) -> {
            error.setValue(e);
            if (remaining.decrementAndGet() == 0) {
                onComplete(toSend, replica2Error);
            }
        });
    }
}
Also used : FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) FlushAction(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction) Preconditions(org.apache.hbase.thirdparty.com.google.common.base.Preconditions) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) IntHashSet(org.agrona.collections.IntHashSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StringUtils(org.apache.hadoop.util.StringUtils) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) WAL(org.apache.hadoop.hbase.wal.WAL) AsyncClusterConnection(org.apache.hadoop.hbase.client.AsyncClusterConnection) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) MutableObject(org.apache.commons.lang3.mutable.MutableObject) ServerCall(org.apache.hadoop.hbase.ipc.ServerCall) Cell(org.apache.hadoop.hbase.Cell) Bytes(org.apache.hadoop.hbase.util.Bytes) Logger(org.slf4j.Logger) Set(java.util.Set) RegionReplicaUtil(org.apache.hadoop.hbase.client.RegionReplicaUtil) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) CellUtil(org.apache.hadoop.hbase.CellUtil) List(java.util.List) FutureUtils(org.apache.hadoop.hbase.util.FutureUtils) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Optional(java.util.Optional) Queue(java.util.Queue) ArrayDeque(java.util.ArrayDeque) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MutableObject(org.apache.commons.lang3.mutable.MutableObject)

Example 83 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class TakeSnapshotHandler method process.

/**
 * Execute the core common portions of taking a snapshot. The {@link #snapshotRegions(List)}
 * call should get implemented for each snapshot flavor.
 */
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", justification = "Intentional")
public void process() {
    String msg = "Running " + snapshot.getType() + " table snapshot " + snapshot.getName() + " " + eventType + " on table " + snapshotTable;
    LOG.info(msg);
    MasterLock tableLockToRelease = this.tableLock;
    status.setStatus(msg);
    try {
        if (downgradeToSharedTableLock()) {
            // release the exclusive lock and hold the shared lock instead
            tableLockToRelease = master.getLockManager().createMasterLock(snapshotTable, LockType.SHARED, this.getClass().getName() + ": take snapshot " + snapshot.getName());
            tableLock.release();
            tableLockToRelease.acquire();
        }
        // If regions move after this meta scan, the region specific snapshot should fail, triggering
        // an external exception that gets captured here.
        // write down the snapshot info in the working directory
        SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, workingDirFs);
        snapshotManifest.addTableDescriptor(this.htd);
        monitor.rethrowException();
        List<Pair<RegionInfo, ServerName>> regionsAndLocations = master.getAssignmentManager().getTableRegionsAndLocations(snapshotTable, false);
        // run the snapshot
        snapshotRegions(regionsAndLocations);
        monitor.rethrowException();
        // extract each pair to separate lists
        Set<String> serverNames = new HashSet<>();
        for (Pair<RegionInfo, ServerName> p : regionsAndLocations) {
            if (p != null && p.getFirst() != null && p.getSecond() != null) {
                RegionInfo hri = p.getFirst();
                if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent()))
                    continue;
                serverNames.add(p.getSecond().toString());
            }
        }
        // flush the in-memory state, and write the single manifest
        status.setStatus("Consolidate snapshot: " + snapshot.getName());
        snapshotManifest.consolidate();
        // verify the snapshot is valid
        status.setStatus("Verifying snapshot: " + snapshot.getName());
        verifier.verifySnapshot(this.workingDir, serverNames);
        // complete the snapshot, atomically moving from tmp to .snapshot dir.
        SnapshotDescriptionUtils.completeSnapshot(this.snapshotDir, this.workingDir, this.rootFs, this.workingDirFs, this.conf);
        finished = true;
        msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
        status.markComplete(msg);
        LOG.info(msg);
        metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
        if (master.getMasterCoprocessorHost() != null) {
            master.getMasterCoprocessorHost().postCompletedSnapshotAction(ProtobufUtil.createSnapshotDesc(snapshot), this.htd);
        }
    } catch (Exception e) {
        // FindBugs: REC_CATCH_EXCEPTION
        status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " + snapshotTable + " because " + e.getMessage());
        String reason = "Failed taking snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " due to exception:" + e.getMessage();
        LOG.error(reason, e);
        ForeignException ee = new ForeignException(reason, e);
        monitor.receive(ee);
        // need to mark this completed to close off and allow cleanup to happen.
        cancel(reason);
    } finally {
        LOG.debug("Launching cleanup of working dir:" + workingDir);
        try {
            // it.
            if (!workingDirFs.delete(workingDir, true)) {
                LOG.error("Couldn't delete snapshot working directory:" + workingDir);
            }
        } catch (IOException e) {
            LOG.error("Couldn't delete snapshot working directory:" + workingDir);
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Table snapshot journal : \n" + status.prettyPrintJournal());
        }
        tableLockToRelease.release();
    }
}
Also used : ServerName(org.apache.hadoop.hbase.ServerName) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) MasterLock(org.apache.hadoop.hbase.master.locking.LockManager.MasterLock) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) KeeperException(org.apache.zookeeper.KeeperException) CancellationException(java.util.concurrent.CancellationException) IOException(java.io.IOException) Pair(org.apache.hadoop.hbase.util.Pair) HashSet(java.util.HashSet)

Example 84 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class MobFileCompactionChore method performMajorCompactionInBatches.

public void performMajorCompactionInBatches(Admin admin, TableDescriptor htd, ColumnFamilyDescriptor hcd) throws IOException, InterruptedException {
    List<RegionInfo> regions = admin.getRegions(htd.getTableName());
    if (regions.size() <= this.regionBatchSize) {
        LOG.debug("Table={} cf={} - performing major MOB compaction in non-batched mode," + "regions={}, batch size={}", htd.getTableName(), hcd.getNameAsString(), regions.size(), regionBatchSize);
        admin.majorCompact(htd.getTableName(), hcd.getName());
        return;
    }
    // Shuffle list of regions in case if they come ordered by region server
    Collections.shuffle(regions);
    // Create first batch
    List<RegionInfo> toCompact = new ArrayList<RegionInfo>(this.regionBatchSize);
    for (int i = 0; i < this.regionBatchSize; i++) {
        toCompact.add(regions.remove(0));
    }
    // Start compaction now
    for (RegionInfo ri : toCompact) {
        startCompaction(admin, htd.getTableName(), ri, hcd.getName());
    }
    List<RegionInfo> compacted = new ArrayList<RegionInfo>(toCompact.size());
    List<RegionInfo> failed = new ArrayList<RegionInfo>();
    int totalCompacted = 0;
    while (!toCompact.isEmpty()) {
        // Check status of active compactions
        for (RegionInfo ri : toCompact) {
            try {
                if (admin.getCompactionStateForRegion(ri.getRegionName()) == CompactionState.NONE) {
                    totalCompacted++;
                    LOG.info("Finished major MOB compaction: table={} cf={} region={} compacted regions={}", htd.getTableName(), hcd.getNameAsString(), ri.getRegionNameAsString(), totalCompacted);
                    compacted.add(ri);
                }
            } catch (IOException e) {
                LOG.error("Could not get compaction state for table={} cf={} region={}, compaction will" + " aborted for the region.", htd.getTableName(), hcd.getNameAsString(), ri.getEncodedName());
                LOG.error("Because of:", e);
                failed.add(ri);
            }
        }
        // Remove failed regions to avoid
        // endless compaction loop
        toCompact.removeAll(failed);
        failed.clear();
        // Update batch: remove compacted regions and add new ones
        for (RegionInfo ri : compacted) {
            toCompact.remove(ri);
            if (regions.size() > 0) {
                RegionInfo region = regions.remove(0);
                toCompact.add(region);
                startCompaction(admin, htd.getTableName(), region, hcd.getName());
            }
        }
        compacted.clear();
        LOG.debug("Table={}  cf={}. Wait for 10 sec, toCompact size={} regions left={}" + " compacted so far={}", htd.getTableName(), hcd.getNameAsString(), toCompact.size(), regions.size(), totalCompacted);
        Thread.sleep(10000);
    }
    LOG.info("Finished major MOB compacting table={}. cf={}", htd.getTableName(), hcd.getNameAsString());
}
Also used : ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException)

Example 85 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class BackupObserver method postBulkLoadHFile.

@Override
public void postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx, List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths) throws IOException {
    Configuration cfg = ctx.getEnvironment().getConfiguration();
    if (finalPaths == null) {
        // there is no need to record state
        return;
    }
    if (!BackupManager.isBackupEnabled(cfg)) {
        LOG.debug("skipping recording bulk load in postBulkLoadHFile since backup is disabled");
        return;
    }
    try (Connection connection = ConnectionFactory.createConnection(cfg);
        BackupSystemTable tbl = new BackupSystemTable(connection)) {
        List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
        RegionInfo info = ctx.getEnvironment().getRegionInfo();
        TableName tableName = info.getTable();
        if (!fullyBackedUpTables.contains(tableName)) {
            if (LOG.isTraceEnabled()) {
                LOG.trace(tableName + " has not gone thru full backup");
            }
            return;
        }
        tbl.writePathsPostBulkLoad(tableName, info.getEncodedNameAsBytes(), finalPaths);
    } catch (IOException ioe) {
        LOG.error("Failed to get tables which have been fully backed up", ioe);
    }
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) TableName(org.apache.hadoop.hbase.TableName) Configuration(org.apache.hadoop.conf.Configuration) Connection(org.apache.hadoop.hbase.client.Connection) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException)

Aggregations

RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)824 Test (org.junit.Test)416 TableName (org.apache.hadoop.hbase.TableName)311 ServerName (org.apache.hadoop.hbase.ServerName)191 ArrayList (java.util.ArrayList)175 IOException (java.io.IOException)174 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)174 Path (org.apache.hadoop.fs.Path)141 List (java.util.List)118 HashMap (java.util.HashMap)90 Table (org.apache.hadoop.hbase.client.Table)90 Map (java.util.Map)81 Put (org.apache.hadoop.hbase.client.Put)81 Configuration (org.apache.hadoop.conf.Configuration)80 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)67 TreeMap (java.util.TreeMap)66 Result (org.apache.hadoop.hbase.client.Result)59 FileSystem (org.apache.hadoop.fs.FileSystem)58 Cell (org.apache.hadoop.hbase.Cell)50 Scan (org.apache.hadoop.hbase.client.Scan)46