use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class MoveRegionProcedure method deserializeStateData.
@Override
protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
super.deserializeStateData(serializer);
final MoveRegionStateData state = serializer.deserialize(MoveRegionStateData.class);
// Get it from super class deserialization.
final RegionInfo regionInfo = getRegion();
final ServerName sourceServer = ProtobufUtil.toServerName(state.getSourceServer());
final ServerName destinationServer = state.hasDestinationServer() ? ProtobufUtil.toServerName(state.getDestinationServer()) : null;
this.plan = new RegionPlan(regionInfo, sourceServer, destinationServer);
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class MetaFixer method calculateTableMerges.
private static void calculateTableMerges(int maxMergeCount, List<SortedSet<RegionInfo>> merges, Collection<Pair<RegionInfo, RegionInfo>> overlaps) {
SortedSet<RegionInfo> currentMergeSet = new TreeSet<>();
HashSet<RegionInfo> regionsInMergeSet = new HashSet<>();
RegionInfo regionInfoWithlargestEndKey = null;
for (Pair<RegionInfo, RegionInfo> pair : overlaps) {
if (regionInfoWithlargestEndKey != null) {
if (!isOverlap(regionInfoWithlargestEndKey, pair) || currentMergeSet.size() >= maxMergeCount) {
// Log when we cut-off-merge because we hit the configured maximum merge limit.
if (currentMergeSet.size() >= maxMergeCount) {
LOG.warn("Ran into maximum-at-a-time merges limit={}", maxMergeCount);
}
// reused in this case.
if (currentMergeSet.size() <= 1) {
for (RegionInfo ri : currentMergeSet) {
regionsInMergeSet.remove(ri);
}
currentMergeSet.clear();
} else {
merges.add(currentMergeSet);
currentMergeSet = new TreeSet<>();
}
}
}
// the second merge request.
if (!regionsInMergeSet.contains(pair.getFirst())) {
currentMergeSet.add(pair.getFirst());
regionsInMergeSet.add(pair.getFirst());
}
if (!regionsInMergeSet.contains(pair.getSecond())) {
currentMergeSet.add(pair.getSecond());
regionsInMergeSet.add(pair.getSecond());
}
regionInfoWithlargestEndKey = getRegionInfoWithLargestEndKey(getRegionInfoWithLargestEndKey(pair.getFirst(), pair.getSecond()), regionInfoWithlargestEndKey);
}
merges.add(currentMergeSet);
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class MetaFixer method getHoleCover.
/**
* @return Attempts to calculate a new {@link RegionInfo} that covers the region range described
* in {@code hole}.
*/
private static Optional<RegionInfo> getHoleCover(Pair<RegionInfo, RegionInfo> hole) {
final RegionInfo left = hole.getFirst();
final RegionInfo right = hole.getSecond();
if (left.getTable().equals(right.getTable())) {
// Simple case.
if (Bytes.compareTo(left.getEndKey(), right.getStartKey()) >= 0) {
LOG.warn("Skipping hole fix; left-side endKey is not less than right-side startKey;" + " left=<{}>, right=<{}>", left, right);
return Optional.empty();
}
return Optional.of(buildRegionInfo(left.getTable(), left.getEndKey(), right.getStartKey()));
}
final boolean leftUndefined = left.equals(RegionInfoBuilder.UNDEFINED);
final boolean rightUndefined = right.equals(RegionInfoBuilder.UNDEFINED);
final boolean last = left.isLast();
final boolean first = right.isFirst();
if (leftUndefined && rightUndefined) {
LOG.warn("Skipping hole fix; both the hole left-side and right-side RegionInfos are " + "UNDEFINED; left=<{}>, right=<{}>", left, right);
return Optional.empty();
}
if (leftUndefined || last) {
return Optional.of(buildRegionInfo(right.getTable(), HConstants.EMPTY_START_ROW, right.getStartKey()));
}
if (rightUndefined || first) {
return Optional.of(buildRegionInfo(left.getTable(), left.getEndKey(), HConstants.EMPTY_END_ROW));
}
LOG.warn("Skipping hole fix; don't know what to do with left=<{}>, right=<{}>", left, right);
return Optional.empty();
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class ReportMakingVisitor method visit.
@Override
public boolean visit(Result r) {
if (r == null || r.isEmpty()) {
return true;
}
this.report.count++;
RegionInfo regionInfo = null;
try {
regionInfo = metaTableConsistencyCheck(r);
} catch (Throwable t) {
LOG.warn("Failed consistency check on {}", Bytes.toStringBinary(r.getRow()), t);
}
if (regionInfo != null) {
LOG.trace(regionInfo.toString());
if (regionInfo.isSplitParent()) {
// splitParent means split and offline.
this.report.splitParents.put(regionInfo, r);
}
if (CatalogFamilyFormat.hasMergeRegions(r.rawCells())) {
this.report.mergedRegions.put(regionInfo, r);
}
}
// Returning true means "keep scanning"
return true;
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class RegionStateStore method addMergeRegions.
static Put addMergeRegions(Put put, Collection<RegionInfo> mergeRegions) throws IOException {
// Arbitrary limit. No room in our formatted 'task0000' below for more.
int limit = 10000;
int max = mergeRegions.size();
if (max > limit) {
// Should never happen!!!!! But just in case.
throw new RuntimeException("Can't merge " + max + " regions in one go; " + limit + " is upper-limit.");
}
int counter = 0;
for (RegionInfo ri : mergeRegions) {
String qualifier = String.format(HConstants.MERGE_QUALIFIER_PREFIX_STR + "%04d", counter++);
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()).setFamily(HConstants.CATALOG_FAMILY).setQualifier(Bytes.toBytes(qualifier)).setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(ri)).build());
}
return put;
}
Aggregations