use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class AsyncRequestFutureImpl method getReplicaLocationOrFail.
private HRegionLocation getReplicaLocationOrFail(Action action) {
// We are going to try get location once again. For each action, we'll do it once
// from cache, because the previous calls in the loop might populate it.
int replicaId = action.getReplicaId();
RegionLocations locs = findAllLocationsOrFail(action, true);
// manageError already called
if (locs == null)
return null;
HRegionLocation loc = locs.getRegionLocation(replicaId);
if (loc == null || loc.getServerName() == null) {
locs = findAllLocationsOrFail(action, false);
// manageError already called
if (locs == null)
return null;
loc = locs.getRegionLocation(replicaId);
}
if (loc == null || loc.getServerName() == null) {
manageLocationError(action, null);
return null;
}
return loc;
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class AssignmentManager method rebuildUserRegions.
/**
* Rebuild the list of user regions and assignment information.
* Updates regionstates with findings as we go through list of regions.
* @return set of servers not online that hosted some regions according to a scan of hbase:meta
* @throws IOException
*/
Set<ServerName> rebuildUserRegions() throws IOException, KeeperException {
Set<TableName> disabledOrEnablingTables = tableStateManager.getTablesInStates(TableState.State.DISABLED, TableState.State.ENABLING);
Set<TableName> disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates(TableState.State.DISABLED, TableState.State.DISABLING, TableState.State.ENABLING);
// Region assignment from META
List<Result> results = MetaTableAccessor.fullScanRegions(server.getConnection());
// Get any new but slow to checkin region server that joined the cluster
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
// Set of offline servers to be returned
Set<ServerName> offlineServers = new HashSet<>();
// Iterate regions in META
for (Result result : results) {
if (result == null && LOG.isDebugEnabled()) {
LOG.debug("null result from meta - ignoring but this is strange.");
continue;
}
// keep a track of replicas to close. These were the replicas of the originally
// unmerged regions. The master might have closed them before but it mightn't
// maybe because it crashed.
PairOfSameType<HRegionInfo> p = MetaTableAccessor.getMergeRegions(result);
if (p.getFirst() != null && p.getSecond() != null) {
int numReplicas = getNumReplicas(server, p.getFirst().getTable());
for (HRegionInfo merge : p) {
for (int i = 1; i < numReplicas; i++) {
replicasToClose.add(RegionReplicaUtil.getRegionInfoForReplica(merge, i));
}
}
}
RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
if (rl == null) {
continue;
}
HRegionLocation[] locations = rl.getRegionLocations();
if (locations == null) {
continue;
}
for (HRegionLocation hrl : locations) {
if (hrl == null)
continue;
HRegionInfo regionInfo = hrl.getRegionInfo();
if (regionInfo == null)
continue;
int replicaId = regionInfo.getReplicaId();
State state = RegionStateStore.getRegionState(result, replicaId);
// but it couldn't maybe because it crashed
if (replicaId == 0 && state.equals(State.SPLIT)) {
for (HRegionLocation h : locations) {
replicasToClose.add(h.getRegionInfo());
}
}
ServerName lastHost = hrl.getServerName();
ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId);
regionStates.createRegionState(regionInfo, state, regionLocation, lastHost);
if (!regionStates.isRegionInState(regionInfo, State.OPEN)) {
// Region is not open (either offline or in transition), skip
continue;
}
TableName tableName = regionInfo.getTable();
if (!onlineServers.contains(regionLocation)) {
// Region is located on a server that isn't online
offlineServers.add(regionLocation);
} else if (!disabledOrEnablingTables.contains(tableName)) {
// Region is being served and on an active server
// add only if region not in disabled or enabling table
regionStates.regionOnline(regionInfo, regionLocation);
balancer.regionOnline(regionInfo, regionLocation);
}
// this will be used in rolling restarts
if (!disabledOrDisablingOrEnabling.contains(tableName) && !getTableStateManager().isTableState(tableName, TableState.State.ENABLED)) {
setEnabledTable(tableName);
}
}
}
return offlineServers;
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class WALEditsReplaySink method replayEntries.
/**
* Replay an array of actions of the same region directly into the newly assigned Region Server
* @param entries to replay
* @throws IOException on IO failure
*/
public void replayEntries(List<Pair<HRegionLocation, Entry>> entries) throws IOException {
if (entries.isEmpty()) {
return;
}
int batchSize = entries.size();
Map<HRegionInfo, List<Entry>> entriesByRegion = new HashMap<>();
HRegionLocation loc = null;
Entry entry = null;
List<Entry> regionEntries = null;
// Build the action list.
for (int i = 0; i < batchSize; i++) {
loc = entries.get(i).getFirst();
entry = entries.get(i).getSecond();
if (entriesByRegion.containsKey(loc.getRegionInfo())) {
regionEntries = entriesByRegion.get(loc.getRegionInfo());
} else {
regionEntries = new ArrayList<>();
entriesByRegion.put(loc.getRegionInfo(), regionEntries);
}
regionEntries.add(entry);
}
long startTime = EnvironmentEdgeManager.currentTime();
// replaying edits by region
for (Map.Entry<HRegionInfo, List<Entry>> _entry : entriesByRegion.entrySet()) {
HRegionInfo curRegion = _entry.getKey();
List<Entry> allActions = _entry.getValue();
// send edits in chunks
int totalActions = allActions.size();
int replayedActions = 0;
int curBatchSize = 0;
for (; replayedActions < totalActions; ) {
curBatchSize = (totalActions > (MAX_BATCH_SIZE + replayedActions)) ? MAX_BATCH_SIZE : (totalActions - replayedActions);
replayEdits(loc, curRegion, allActions.subList(replayedActions, replayedActions + curBatchSize));
replayedActions += curBatchSize;
}
}
long endTime = EnvironmentEdgeManager.currentTime() - startTime;
LOG.debug("number of rows:" + entries.size() + " are sent by batch! spent " + endTime + "(ms)!");
metrics.updateReplayTime(endTime);
metrics.updateReplayBatchSize(batchSize);
this.totalReplayedEdits.addAndGet(batchSize);
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class TestAsyncProcess method testThreadCreation.
/**
* This test simulates multiple regions on 2 servers. We should have 2 multi requests and
* 2 threads: 1 per server, this whatever the number of regions.
*/
@Test
public void testThreadCreation() throws Exception {
final int NB_REGS = 100;
List<HRegionLocation> hrls = new ArrayList<>(NB_REGS);
List<Get> gets = new ArrayList<>(NB_REGS);
for (int i = 0; i < NB_REGS; i++) {
HRegionInfo hri = new HRegionInfo(DUMMY_TABLE, Bytes.toBytes(i * 10L), Bytes.toBytes(i * 10L + 9L), false, i);
HRegionLocation hrl = new HRegionLocation(hri, i % 2 == 0 ? sn : sn2);
hrls.add(hrl);
Get get = new Get(Bytes.toBytes(i * 10L));
gets.add(get);
}
MyConnectionImpl2 con = new MyConnectionImpl2(hrls);
MyAsyncProcess ap = new MyAsyncProcess(con, CONF, con.nbThreads);
BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
BufferedMutatorImpl mutator = new BufferedMutatorImpl(con, bufferParam, ap);
HTable ht = new HTable(con, mutator);
ht.multiAp = ap;
ht.batch(gets, null);
Assert.assertEquals(ap.nbActions.get(), NB_REGS);
Assert.assertEquals("1 multi response per server", 2, ap.nbMultiResponse.get());
Assert.assertEquals("1 thread per server", 2, con.nbThreads.get());
int nbReg = 0;
for (int i = 0; i < NB_REGS; i++) {
if (con.usedRegions[i])
nbReg++;
}
Assert.assertEquals("nbReg=" + nbReg, nbReg, NB_REGS);
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class Canary method sniff.
/*
* Loops over regions that owns this table, and output some information abouts the state.
*/
private static List<Future<Void>> sniff(final Admin admin, final Sink sink, HTableDescriptor tableDesc, ExecutorService executor, TaskType taskType, boolean rawScanEnabled) throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("reading list of regions for table %s", tableDesc.getTableName()));
}
Table table = null;
try {
table = admin.getConnection().getTable(tableDesc.getTableName());
} catch (TableNotFoundException e) {
return new ArrayList<>();
} finally {
if (table != null) {
table.close();
}
}
List<RegionTask> tasks = new ArrayList<>();
RegionLocator regionLocator = null;
try {
regionLocator = admin.getConnection().getRegionLocator(tableDesc.getTableName());
for (HRegionLocation location : regionLocator.getAllRegionLocations()) {
ServerName rs = location.getServerName();
HRegionInfo region = location.getRegionInfo();
tasks.add(new RegionTask(admin.getConnection(), region, rs, sink, taskType, rawScanEnabled));
}
} finally {
if (regionLocator != null) {
regionLocator.close();
}
}
return executor.invokeAll(tasks);
}
Aggregations