use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class CatalogJanitor method cleanParent.
static boolean cleanParent(MasterServices services, RegionInfo parent, Result rowContent) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Cleaning parent region {}", parent);
}
// Check whether it is a merged region and if it is clean of references.
if (CatalogFamilyFormat.hasMergeRegions(rowContent.rawCells())) {
// Wait until clean of merge parent regions first
if (LOG.isDebugEnabled()) {
LOG.debug("Region {} has merge parents, cleaning them first", parent);
}
return false;
}
// Run checks on each daughter split.
PairOfSameType<RegionInfo> daughters = MetaTableAccessor.getDaughterRegions(rowContent);
Pair<Boolean, Boolean> a = checkDaughterInFs(services, parent, daughters.getFirst());
Pair<Boolean, Boolean> b = checkDaughterInFs(services, parent, daughters.getSecond());
if (hasNoReferences(a) && hasNoReferences(b)) {
String daughterA = daughters.getFirst() != null ? daughters.getFirst().getShortNameToLog() : "null";
String daughterB = daughters.getSecond() != null ? daughters.getSecond().getShortNameToLog() : "null";
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting region " + parent.getShortNameToLog() + " because daughters -- " + daughterA + ", " + daughterB + " -- no longer hold references");
}
ProcedureExecutor<MasterProcedureEnv> pe = services.getMasterProcedureExecutor();
GCRegionProcedure gcRegionProcedure = new GCRegionProcedure(pe.getEnvironment(), parent);
pe.submitProcedure(gcRegionProcedure);
if (LOG.isDebugEnabled()) {
LOG.debug("Submitted procedure {} for split parent {}", gcRegionProcedure, parent);
}
return true;
} else {
if (LOG.isDebugEnabled()) {
if (!hasNoReferences(a)) {
LOG.debug("Deferring removal of region {} because daughter {} still has references", parent, daughters.getFirst());
}
if (!hasNoReferences(b)) {
LOG.debug("Deferring removal of region {} because daughter {} still has references", parent, daughters.getSecond());
}
}
}
return false;
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class RegionReplicationSink method send.
private void send() {
List<SinkEntry> toSend = new ArrayList<>();
long totalSize = 0L;
boolean hasMetaEdit = false;
for (SinkEntry entry; ; ) {
entry = entries.poll();
if (entry == null) {
break;
}
toSend.add(entry);
totalSize += entry.size;
hasMetaEdit |= entry.edit.isMetaEdit();
if (toSend.size() >= batchCountCapacity || totalSize >= batchSizeCapacity) {
break;
}
}
int toSendReplicaCount = regionReplication - 1 - failedReplicas.size();
if (toSendReplicaCount <= 0) {
return;
}
long rpcTimeoutNsToUse;
long operationTimeoutNsToUse;
if (!hasMetaEdit) {
rpcTimeoutNsToUse = rpcTimeoutNs;
operationTimeoutNsToUse = operationTimeoutNs;
} else {
rpcTimeoutNsToUse = metaEditRpcTimeoutNs;
operationTimeoutNsToUse = metaEditOperationTimeoutNs;
}
sending = true;
List<WAL.Entry> walEntries = toSend.stream().map(e -> new WAL.Entry(e.key, e.edit)).collect(Collectors.toList());
AtomicInteger remaining = new AtomicInteger(toSendReplicaCount);
Map<Integer, MutableObject<Throwable>> replica2Error = new HashMap<>();
for (int replicaId = 1; replicaId < regionReplication; replicaId++) {
if (failedReplicas.contains(replicaId)) {
continue;
}
MutableObject<Throwable> error = new MutableObject<>();
replica2Error.put(replicaId, error);
RegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(primary, replicaId);
FutureUtils.addListener(conn.replicate(replica, walEntries, retries, rpcTimeoutNsToUse, operationTimeoutNsToUse), (r, e) -> {
error.setValue(e);
if (remaining.decrementAndGet() == 0) {
onComplete(toSend, replica2Error);
}
});
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class TakeSnapshotHandler method process.
/**
* Execute the core common portions of taking a snapshot. The {@link #snapshotRegions(List)}
* call should get implemented for each snapshot flavor.
*/
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", justification = "Intentional")
public void process() {
String msg = "Running " + snapshot.getType() + " table snapshot " + snapshot.getName() + " " + eventType + " on table " + snapshotTable;
LOG.info(msg);
MasterLock tableLockToRelease = this.tableLock;
status.setStatus(msg);
try {
if (downgradeToSharedTableLock()) {
// release the exclusive lock and hold the shared lock instead
tableLockToRelease = master.getLockManager().createMasterLock(snapshotTable, LockType.SHARED, this.getClass().getName() + ": take snapshot " + snapshot.getName());
tableLock.release();
tableLockToRelease.acquire();
}
// If regions move after this meta scan, the region specific snapshot should fail, triggering
// an external exception that gets captured here.
// write down the snapshot info in the working directory
SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, workingDirFs);
snapshotManifest.addTableDescriptor(this.htd);
monitor.rethrowException();
List<Pair<RegionInfo, ServerName>> regionsAndLocations = master.getAssignmentManager().getTableRegionsAndLocations(snapshotTable, false);
// run the snapshot
snapshotRegions(regionsAndLocations);
monitor.rethrowException();
// extract each pair to separate lists
Set<String> serverNames = new HashSet<>();
for (Pair<RegionInfo, ServerName> p : regionsAndLocations) {
if (p != null && p.getFirst() != null && p.getSecond() != null) {
RegionInfo hri = p.getFirst();
if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent()))
continue;
serverNames.add(p.getSecond().toString());
}
}
// flush the in-memory state, and write the single manifest
status.setStatus("Consolidate snapshot: " + snapshot.getName());
snapshotManifest.consolidate();
// verify the snapshot is valid
status.setStatus("Verifying snapshot: " + snapshot.getName());
verifier.verifySnapshot(this.workingDir, serverNames);
// complete the snapshot, atomically moving from tmp to .snapshot dir.
SnapshotDescriptionUtils.completeSnapshot(this.snapshotDir, this.workingDir, this.rootFs, this.workingDirFs, this.conf);
finished = true;
msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
status.markComplete(msg);
LOG.info(msg);
metricsSnapshot.addSnapshot(status.getCompletionTimestamp() - status.getStartTime());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postCompletedSnapshotAction(ProtobufUtil.createSnapshotDesc(snapshot), this.htd);
}
} catch (Exception e) {
// FindBugs: REC_CATCH_EXCEPTION
status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " + snapshotTable + " because " + e.getMessage());
String reason = "Failed taking snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " due to exception:" + e.getMessage();
LOG.error(reason, e);
ForeignException ee = new ForeignException(reason, e);
monitor.receive(ee);
// need to mark this completed to close off and allow cleanup to happen.
cancel(reason);
} finally {
LOG.debug("Launching cleanup of working dir:" + workingDir);
try {
// it.
if (!workingDirFs.delete(workingDir, true)) {
LOG.error("Couldn't delete snapshot working directory:" + workingDir);
}
} catch (IOException e) {
LOG.error("Couldn't delete snapshot working directory:" + workingDir);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Table snapshot journal : \n" + status.prettyPrintJournal());
}
tableLockToRelease.release();
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class MobFileCompactionChore method performMajorCompactionInBatches.
public void performMajorCompactionInBatches(Admin admin, TableDescriptor htd, ColumnFamilyDescriptor hcd) throws IOException, InterruptedException {
List<RegionInfo> regions = admin.getRegions(htd.getTableName());
if (regions.size() <= this.regionBatchSize) {
LOG.debug("Table={} cf={} - performing major MOB compaction in non-batched mode," + "regions={}, batch size={}", htd.getTableName(), hcd.getNameAsString(), regions.size(), regionBatchSize);
admin.majorCompact(htd.getTableName(), hcd.getName());
return;
}
// Shuffle list of regions in case if they come ordered by region server
Collections.shuffle(regions);
// Create first batch
List<RegionInfo> toCompact = new ArrayList<RegionInfo>(this.regionBatchSize);
for (int i = 0; i < this.regionBatchSize; i++) {
toCompact.add(regions.remove(0));
}
// Start compaction now
for (RegionInfo ri : toCompact) {
startCompaction(admin, htd.getTableName(), ri, hcd.getName());
}
List<RegionInfo> compacted = new ArrayList<RegionInfo>(toCompact.size());
List<RegionInfo> failed = new ArrayList<RegionInfo>();
int totalCompacted = 0;
while (!toCompact.isEmpty()) {
// Check status of active compactions
for (RegionInfo ri : toCompact) {
try {
if (admin.getCompactionStateForRegion(ri.getRegionName()) == CompactionState.NONE) {
totalCompacted++;
LOG.info("Finished major MOB compaction: table={} cf={} region={} compacted regions={}", htd.getTableName(), hcd.getNameAsString(), ri.getRegionNameAsString(), totalCompacted);
compacted.add(ri);
}
} catch (IOException e) {
LOG.error("Could not get compaction state for table={} cf={} region={}, compaction will" + " aborted for the region.", htd.getTableName(), hcd.getNameAsString(), ri.getEncodedName());
LOG.error("Because of:", e);
failed.add(ri);
}
}
// Remove failed regions to avoid
// endless compaction loop
toCompact.removeAll(failed);
failed.clear();
// Update batch: remove compacted regions and add new ones
for (RegionInfo ri : compacted) {
toCompact.remove(ri);
if (regions.size() > 0) {
RegionInfo region = regions.remove(0);
toCompact.add(region);
startCompaction(admin, htd.getTableName(), region, hcd.getName());
}
}
compacted.clear();
LOG.debug("Table={} cf={}. Wait for 10 sec, toCompact size={} regions left={}" + " compacted so far={}", htd.getTableName(), hcd.getNameAsString(), toCompact.size(), regions.size(), totalCompacted);
Thread.sleep(10000);
}
LOG.info("Finished major MOB compacting table={}. cf={}", htd.getTableName(), hcd.getNameAsString());
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class BackupObserver method postBulkLoadHFile.
@Override
public void postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx, List<Pair<byte[], String>> stagingFamilyPaths, Map<byte[], List<Path>> finalPaths) throws IOException {
Configuration cfg = ctx.getEnvironment().getConfiguration();
if (finalPaths == null) {
// there is no need to record state
return;
}
if (!BackupManager.isBackupEnabled(cfg)) {
LOG.debug("skipping recording bulk load in postBulkLoadHFile since backup is disabled");
return;
}
try (Connection connection = ConnectionFactory.createConnection(cfg);
BackupSystemTable tbl = new BackupSystemTable(connection)) {
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
RegionInfo info = ctx.getEnvironment().getRegionInfo();
TableName tableName = info.getTable();
if (!fullyBackedUpTables.contains(tableName)) {
if (LOG.isTraceEnabled()) {
LOG.trace(tableName + " has not gone thru full backup");
}
return;
}
tbl.writePathsPostBulkLoad(tableName, info.getEncodedNameAsBytes(), finalPaths);
} catch (IOException ioe) {
LOG.error("Failed to get tables which have been fully backed up", ioe);
}
}
Aggregations