use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class RegionLocations method mergeLocations.
/**
* Merges this RegionLocations list with the given list assuming
* same range, and keeping the most up to date version of the
* HRegionLocation entries from either list according to seqNum. If seqNums
* are equal, the location from the argument (other) is taken.
* @param other the locations to merge with
* @return an RegionLocations object with merged locations or the same object
* if nothing is merged
*/
public RegionLocations mergeLocations(RegionLocations other) {
assert other != null;
HRegionLocation[] newLocations = null;
// Use the length from other, since it is coming from meta. Otherwise,
// in case of region replication going down, we might have a leak here.
int max = other.locations.length;
RegionInfo regionInfo = null;
for (int i = 0; i < max; i++) {
HRegionLocation thisLoc = this.getRegionLocation(i);
HRegionLocation otherLoc = other.getRegionLocation(i);
if (regionInfo == null && otherLoc != null && otherLoc.getRegion() != null) {
// regionInfo is the first non-null HRI from other RegionLocations. We use it to ensure that
// all replica region infos belong to the same region with same region id.
regionInfo = otherLoc.getRegion();
}
HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false);
if (selectedLoc != thisLoc) {
if (newLocations == null) {
newLocations = new HRegionLocation[max];
System.arraycopy(locations, 0, newLocations, 0, i);
}
}
if (newLocations != null) {
newLocations[i] = selectedLoc;
}
}
// ensure that all replicas share the same start code. Otherwise delete them
if (newLocations != null && regionInfo != null) {
for (int i = 0; i < newLocations.length; i++) {
if (newLocations[i] != null) {
if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, newLocations[i].getRegion())) {
newLocations[i] = null;
}
}
}
}
return newLocations == null ? this : new RegionLocations(newLocations);
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class CompactRandomRegionOfTableAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtil util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
boolean major = RandomUtils.nextInt(0, 100) < majorRatio;
getLogger().info("Performing action: Compact random region of table " + tableName + ", major=" + major);
List<RegionInfo> regions = admin.getRegions(tableName);
if (regions == null || regions.isEmpty()) {
getLogger().info("Table " + tableName + " doesn't have regions to compact");
return;
}
RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem(regions.toArray(new RegionInfo[0]));
try {
if (major) {
getLogger().debug("Major compacting region " + region.getRegionNameAsString());
admin.majorCompactRegion(region.getRegionName());
} else {
getLogger().debug("Compacting region " + region.getRegionNameAsString());
admin.compactRegion(region.getRegionName());
}
} catch (Exception ex) {
getLogger().warn("Compaction failed, might be caused by other chaos: " + ex.getMessage());
}
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class CloneSnapshotProcedure method createFilesystemLayout.
/**
* Create regions in file system.
* @param env MasterProcedureEnv
* @throws IOException
*/
private List<RegionInfo> createFilesystemLayout(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, final List<RegionInfo> newRegions) throws IOException {
return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<RegionInfo> createHdfsRegions(final MasterProcedureEnv env, final Path tableRootDir, final TableName tableName, final List<RegionInfo> newRegions) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final FileSystem fs = mfs.getFileSystem();
final Path rootDir = mfs.getRootDir();
final Configuration conf = env.getMasterConfiguration();
final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
getMonitorStatus().setStatus("Clone snapshot - creating regions for table: " + tableName);
try {
// 1. Execute the on-disk Clone
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs, manifest, tableDescriptor, tableRootDir, monitorException, monitorStatus);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
// Clone operation should not have stuff to restore or remove
Preconditions.checkArgument(!metaChanges.hasRegionsToRestore(), "A clone should not have regions to restore");
Preconditions.checkArgument(!metaChanges.hasRegionsToRemove(), "A clone should not have regions to remove");
// At this point the clone is complete. Next step is enabling the table.
String msg = "Clone snapshot=" + snapshot.getName() + " on table=" + tableName + " completed!";
LOG.info(msg);
monitorStatus.setStatus(msg + " Waiting for table to be enabled...");
// 2. Let the next step to add the regions to meta
return metaChanges.getRegionsToAdd();
} catch (Exception e) {
String msg = "clone snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed because " + e.getMessage();
LOG.error(msg, e);
IOException rse = new RestoreSnapshotException(msg, e, ProtobufUtil.createSnapshotDesc(snapshot));
// these handlers aren't futures so we need to register the error here.
monitorException.receive(new ForeignException("Master CloneSnapshotProcedure", rse));
throw rse;
}
}
});
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class CloneSnapshotProcedure method executeFromState.
@Override
protected Flow executeFromState(final MasterProcedureEnv env, final CloneSnapshotState state) throws InterruptedException {
LOG.trace("{} execute state={}", this, state);
try {
switch(state) {
case CLONE_SNAPSHOT_PRE_OPERATION:
// Verify if we can clone the table
prepareClone(env);
preCloneSnapshot(env);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT);
break;
case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
updateTableDescriptorWithSFT();
newRegions = createFilesystemLayout(env, tableDescriptor, newRegions);
env.getMasterServices().getTableDescriptors().update(tableDescriptor, true);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META);
break;
case CLONE_SNAPSHOT_ADD_TO_META:
addRegionsToMeta(env);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ASSIGN_REGIONS);
break;
case CLONE_SNAPSHOT_ASSIGN_REGIONS:
CreateTableProcedure.setEnablingState(env, getTableName());
// Separate newRegions to split regions and regions to assign
List<RegionInfo> splitRegions = new ArrayList<>();
List<RegionInfo> regionsToAssign = new ArrayList<>();
newRegions.forEach(ri -> {
if (ri.isOffline() && (ri.isSplit() || ri.isSplitParent())) {
splitRegions.add(ri);
} else {
regionsToAssign.add(ri);
}
});
// For split regions, add them to RegionStates
AssignmentManager am = env.getAssignmentManager();
splitRegions.forEach(ri -> am.getRegionStates().updateRegionState(ri, RegionState.State.SPLIT));
addChildProcedure(env.getAssignmentManager().createRoundRobinAssignProcedures(regionsToAssign));
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_UPDATE_DESC_CACHE);
break;
case CLONE_SNAPSHOT_UPDATE_DESC_CACHE:
// XXX: this stage should be named as set table enabled, as now we will cache the
// descriptor after writing fs layout.
CreateTableProcedure.setEnabledState(env, getTableName());
setNextState(CloneSnapshotState.CLONE_SNAPHOST_RESTORE_ACL);
break;
case CLONE_SNAPHOST_RESTORE_ACL:
restoreSnapshotAcl(env);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_POST_OPERATION);
break;
case CLONE_SNAPSHOT_POST_OPERATION:
postCloneSnapshot(env);
MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
metricsSnapshot.addSnapshotClone(getMonitorStatus().getCompletionTimestamp() - getMonitorStatus().getStartTime());
getMonitorStatus().markComplete("Clone snapshot '" + snapshot.getName() + "' completed!");
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);
}
} catch (IOException e) {
if (isRollbackSupported(state)) {
setFailure("master-clone-snapshot", e);
} else {
LOG.warn("Retriable error trying to clone snapshot=" + snapshot.getName() + " to table=" + getTableName() + " state=" + state, e);
}
}
return Flow.HAS_MORE_STATE;
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class GCMergedRegionsProcedure method createGCRegionProcedures.
private GCRegionProcedure[] createGCRegionProcedures(final MasterProcedureEnv env) {
GCRegionProcedure[] procs = new GCRegionProcedure[2];
int index = 0;
for (RegionInfo hri : new RegionInfo[] { this.father, this.mother }) {
GCRegionProcedure proc = new GCRegionProcedure(env, hri);
proc.setOwner(env.getRequestUser().getShortName());
procs[index++] = proc;
}
return procs;
}
Aggregations