use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class TestZooKeeper method testGetChildDataAndWatchForNewChildrenShouldNotThrowNPE.
/**
* Test should not fail with NPE when getChildDataAndWatchForNewChildren
* invoked with wrongNode
*/
@Test
@SuppressWarnings("deprecation")
public void testGetChildDataAndWatchForNewChildrenShouldNotThrowNPE() throws Exception {
ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), name.getMethodName(), null);
ZKUtil.getChildDataAndWatchForNewChildren(zkw, "/wrongNode");
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class HBaseAdmin method split.
/**
* {@inheritDoc}
*/
@Override
public void split(final TableName tableName, final byte[] splitPoint) throws IOException {
ZooKeeperWatcher zookeeper = null;
try {
checkTableExists(tableName);
zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), new ThrowableAbortable());
List<Pair<HRegionInfo, ServerName>> pairs;
if (TableName.META_TABLE_NAME.equals(tableName)) {
pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
} else {
pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
}
for (Pair<HRegionInfo, ServerName> pair : pairs) {
// May not be a server for a particular row
if (pair.getSecond() == null)
continue;
HRegionInfo r = pair.getFirst();
// check for parents
if (r.isSplitParent())
continue;
// if a split point given, only split that particular region
if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || (splitPoint != null && !r.containsRow(splitPoint)))
continue;
// call out to region server to do split now
split(pair.getSecond(), pair.getFirst(), splitPoint);
}
} finally {
if (zookeeper != null) {
zookeeper.close();
}
}
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class HRegionServer method getLastFailedRSFromZK.
/**
* Return the last failed RS name under /hbase/recovering-regions/encodedRegionName
* @param encodedRegionName
* @throws KeeperException
*/
private String getLastFailedRSFromZK(String encodedRegionName) throws KeeperException {
String result = null;
long maxZxid = 0;
ZooKeeperWatcher zkw = this.getZooKeeper();
String nodePath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, encodedRegionName);
List<String> failedServers = ZKUtil.listChildrenNoWatch(zkw, nodePath);
if (failedServers == null || failedServers.isEmpty()) {
return result;
}
for (String failedServer : failedServers) {
String rsPath = ZKUtil.joinZNode(nodePath, failedServer);
Stat stat = new Stat();
ZKUtil.getDataNoWatch(zkw, rsPath, stat);
if (maxZxid < stat.getCzxid()) {
maxZxid = stat.getCzxid();
result = failedServer;
}
}
return result;
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class HRegionServer method updateRecoveringRegionLastFlushedSequenceId.
/**
* A helper function to store the last flushed sequence Id with the previous failed RS for a
* recovering region. The Id is used to skip wal edits which are flushed. Since the flushed
* sequence id is only valid for each RS, we associate the Id with corresponding failed RS.
* @throws KeeperException
* @throws IOException
*/
private void updateRecoveringRegionLastFlushedSequenceId(Region r) throws KeeperException, IOException {
if (!r.isRecovering()) {
// return immdiately for non-recovering regions
return;
}
HRegionInfo regionInfo = r.getRegionInfo();
ZooKeeperWatcher zkw = getZooKeeper();
String previousRSName = this.getLastFailedRSFromZK(regionInfo.getEncodedName());
Map<byte[], Long> maxSeqIdInStores = r.getMaxStoreSeqId();
long minSeqIdForLogReplay = -1;
for (Long storeSeqIdForReplay : maxSeqIdInStores.values()) {
if (minSeqIdForLogReplay == -1 || storeSeqIdForReplay < minSeqIdForLogReplay) {
minSeqIdForLogReplay = storeSeqIdForReplay;
}
}
try {
long lastRecordedFlushedSequenceId = -1;
String nodePath = ZKUtil.joinZNode(this.zooKeeper.znodePaths.recoveringRegionsZNode, regionInfo.getEncodedName());
// recovering-region level
byte[] data;
try {
data = ZKUtil.getData(zkw, nodePath);
} catch (InterruptedException e) {
throw new InterruptedIOException();
}
if (data != null) {
lastRecordedFlushedSequenceId = ZKSplitLog.parseLastFlushedSequenceIdFrom(data);
}
if (data == null || lastRecordedFlushedSequenceId < minSeqIdForLogReplay) {
ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay));
}
if (previousRSName != null) {
// one level deeper for the failed RS
nodePath = ZKUtil.joinZNode(nodePath, previousRSName);
ZKUtil.setData(zkw, nodePath, ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores));
LOG.debug("Update last flushed sequence id of region " + regionInfo.getEncodedName() + " for " + previousRSName);
} else {
LOG.warn("Can't find failed region server for recovering region " + regionInfo.getEncodedName());
}
} catch (NoNodeException ignore) {
LOG.debug("Region " + regionInfo.getEncodedName() + " must have completed recovery because its recovery znode has been removed", ignore);
}
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class RegionMover method getServerNameForRegion.
/**
* Get servername that is up in hbase:meta hosting the given region. this is hostname + port +
* startcode comma-delimited. Can return null
* @param admin
* @param region
* @return regionServer hosting the given region
* @throws IOException
*/
private String getServerNameForRegion(Admin admin, HRegionInfo region) throws IOException {
String server = null;
if (!admin.isTableEnabled(region.getTable())) {
return null;
}
if (region.isMetaRegion()) {
ZooKeeperWatcher zkw = new ZooKeeperWatcher(admin.getConfiguration(), "region_mover", null);
MetaTableLocator locator = new MetaTableLocator();
int maxWaitInSeconds = admin.getConfiguration().getInt(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX);
try {
server = locator.waitMetaRegionLocation(zkw, maxWaitInSeconds * 1000).toString() + ",";
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for location of Meta", e);
} finally {
if (zkw != null) {
zkw.close();
}
}
} else {
Table table = admin.getConnection().getTable(TableName.META_TABLE_NAME);
try {
Get get = new Get(region.getRegionName());
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
Result result = table.get(get);
if (result != null) {
byte[] servername = result.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
byte[] startcode = result.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
if (servername != null) {
server = Bytes.toString(servername).replaceFirst(":", ",") + "," + Bytes.toLong(startcode);
}
}
} catch (IOException e) {
LOG.error("Could not get Server Name for region:" + region.getEncodedName(), e);
throw e;
} finally {
table.close();
}
}
return server;
}
Aggregations