use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class TestDistributedLogSplitting method testSameVersionUpdatesRecovery.
@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testSameVersionUpdatesRecovery() throws Exception {
LOG.info("testSameVersionUpdatesRecovery");
conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024);
conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
startCluster(NUM_RS);
final AtomicLong sequenceId = new AtomicLong(100);
final int NUM_REGIONS_TO_CREATE = 40;
final int NUM_LOG_LINES = 1000;
// turn off load balancing to prevent regions from moving around otherwise
// they will consume recovered.edits
master.balanceSwitch(false);
List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
Table ht = installTable(zkw, name.getMethodName(), "family", NUM_REGIONS_TO_CREATE);
try {
List<HRegionInfo> regions = null;
HRegionServer hrs = null;
for (int i = 0; i < NUM_RS; i++) {
boolean isCarryingMeta = false;
hrs = rsts.get(i).getRegionServer();
regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
for (HRegionInfo region : regions) {
if (region.isMetaRegion()) {
isCarryingMeta = true;
break;
}
}
if (isCarryingMeta) {
continue;
}
break;
}
LOG.info("#regions = " + regions.size());
Iterator<HRegionInfo> it = regions.iterator();
while (it.hasNext()) {
HRegionInfo region = it.next();
if (region.isMetaTable() || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
it.remove();
}
}
if (regions.isEmpty())
return;
HRegionInfo curRegionInfo = regions.get(0);
byte[] startRow = curRegionInfo.getStartKey();
if (startRow == null || startRow.length == 0) {
startRow = new byte[] { 0, 0, 0, 0, 1 };
}
byte[] row = Bytes.incrementBytes(startRow, 1);
// use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
row = Arrays.copyOfRange(row, 3, 8);
long value = 0;
TableName tableName = TableName.valueOf(name.getMethodName());
byte[] family = Bytes.toBytes("family");
byte[] qualifier = Bytes.toBytes("c1");
long timeStamp = System.currentTimeMillis();
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(family));
final WAL wal = hrs.getWAL(curRegionInfo);
for (int i = 0; i < NUM_LOG_LINES; i += 1) {
WALEdit e = new WALEdit();
value++;
e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
wal.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), e, true);
}
wal.sync();
wal.shutdown();
// wait for abort completes
this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
// verify we got the last value
LOG.info("Verification Starts...");
Get g = new Get(row);
Result r = ht.get(g);
long theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
assertEquals(value, theStoredVal);
// after flush
LOG.info("Verification after flush...");
TEST_UTIL.getAdmin().flush(tableName);
r = ht.get(g);
theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
assertEquals(value, theStoredVal);
} finally {
if (ht != null)
ht.close();
if (zkw != null)
zkw.close();
}
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class TestActiveMasterManager method testRestartMaster.
@Test
public void testRestartMaster() throws IOException, KeeperException {
ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true);
try {
ZKUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode);
ZKUtil.deleteNode(zk, zk.znodePaths.clusterStateZNode);
} catch (KeeperException.NoNodeException nne) {
}
// Create the master node with a dummy address
ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis());
// Should not have a master yet
DummyMaster dummyMaster = new DummyMaster(zk, master);
ClusterStatusTracker clusterStatusTracker = dummyMaster.getClusterStatusTracker();
ActiveMasterManager activeMasterManager = dummyMaster.getActiveMasterManager();
assertFalse(activeMasterManager.clusterHasActiveMaster.get());
// First test becoming the active master uninterrupted
MonitoredTask status = Mockito.mock(MonitoredTask.class);
clusterStatusTracker.setClusterUp();
activeMasterManager.blockUntilBecomingActiveMaster(100, status);
assertTrue(activeMasterManager.clusterHasActiveMaster.get());
assertMaster(zk, master);
// Now pretend master restart
DummyMaster secondDummyMaster = new DummyMaster(zk, master);
ActiveMasterManager secondActiveMasterManager = secondDummyMaster.getActiveMasterManager();
assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get());
activeMasterManager.blockUntilBecomingActiveMaster(100, status);
assertTrue(activeMasterManager.clusterHasActiveMaster.get());
assertMaster(zk, master);
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class IntegrationTestZKAndFSPermissions method testZNodeACLs.
private void testZNodeACLs() throws IOException, KeeperException, InterruptedException {
ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, "IntegrationTestZnodeACLs", null);
RecoverableZooKeeper zk = ZKUtil.connect(this.conf, watcher);
String baseZNode = watcher.znodePaths.baseZNode;
LOG.info("");
LOG.info("***********************************************************************************");
LOG.info("Checking ZK permissions, root znode: " + baseZNode);
LOG.info("***********************************************************************************");
LOG.info("");
checkZnodePermsRecursive(watcher, zk, baseZNode);
LOG.info("Checking ZK permissions: SUCCESS");
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class ZNodeClearer method clear.
/**
* Delete the master znode if its content (ServerName string) is the same
* as the one in the znode file. (env: HBASE_ZNODE_FILE). I case of master-rs
* colloaction we extract ServerName string from rsZnode path.(HBASE-14861)
* @return true on successful deletion, false otherwise.
*/
public static boolean clear(Configuration conf) {
Configuration tempConf = new Configuration(conf);
tempConf.setInt("zookeeper.recovery.retry", 0);
ZooKeeperWatcher zkw;
try {
zkw = new ZooKeeperWatcher(tempConf, "clean znode for master", new Abortable() {
@Override
public void abort(String why, Throwable e) {
}
@Override
public boolean isAborted() {
return false;
}
});
} catch (IOException e) {
LOG.warn("Can't connect to zookeeper to read the master znode", e);
return false;
}
String znodeFileContent;
try {
znodeFileContent = ZNodeClearer.readMyEphemeralNodeOnDisk();
if (ZNodeClearer.tablesOnMaster(conf)) {
//In case of master crash also remove rsZnode since master is also regionserver
ZKUtil.deleteNodeFailSilent(zkw, znodeFileContent);
return MasterAddressTracker.deleteIfEquals(zkw, ZNodeClearer.parseMasterServerName(znodeFileContent));
} else {
return MasterAddressTracker.deleteIfEquals(zkw, znodeFileContent);
}
} catch (FileNotFoundException fnfe) {
// If no file, just keep going -- return success.
LOG.warn("Can't find the znode file; presume non-fatal", fnfe);
return true;
} catch (IOException e) {
LOG.warn("Can't read the content of the znode file", e);
return false;
} catch (KeeperException e) {
LOG.warn("ZooKeeper exception deleting znode", e);
return false;
} finally {
zkw.close();
}
}
use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.
the class RegionServerFlushTableProcedureManager method initialize.
/**
* Initialize this region server flush procedure manager
* Uses a zookeeper based member controller.
* @param rss region server
* @throws KeeperException if the zookeeper cannot be reached
*/
@Override
public void initialize(RegionServerServices rss) throws KeeperException {
this.rss = rss;
ZooKeeperWatcher zkw = rss.getZooKeeper();
this.memberRpcs = new ZKProcedureMemberRpcs(zkw, MasterFlushTableProcedureManager.FLUSH_TABLE_PROCEDURE_SIGNATURE);
Configuration conf = rss.getConfiguration();
long keepAlive = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT);
int opThreads = conf.getInt(FLUSH_REQUEST_THREADS_KEY, FLUSH_REQUEST_THREADS_DEFAULT);
// create the actual flush table procedure member
ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive);
this.member = new ProcedureMember(memberRpcs, pool, new FlushTableSubprocedureBuilder());
}
Aggregations