use of org.apache.accumulo.fate.zookeeper.ZooReader in project accumulo by apache.
the class VolumeIT method testReplaceVolume.
private void testReplaceVolume(boolean cleanShutdown) throws Exception {
String[] tableNames = getUniqueNames(3);
verifyVolumesUsed(tableNames[0], false, v1, v2);
// write to 2nd table, but do not flush data to disk before shutdown
writeData(tableNames[1], cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)));
if (cleanShutdown)
Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
cluster.stop();
File v1f = new File(v1.toUri());
File v8f = new File(new File(v1.getParent().toUri()), "v8");
Assert.assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
Path v8 = new Path(v8f.toURI());
File v2f = new File(v2.toUri());
File v9f = new File(new File(v2.getParent().toUri()), "v9");
Assert.assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
Path v9 = new Path(v9f.toURI());
Configuration conf = new Configuration(false);
conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
conf.set(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
conf.set(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
conf.writeXml(fos);
fos.close();
// start cluster and verify that volumes were replaced
cluster.start();
verifyVolumesUsed(tableNames[0], true, v8, v9);
verifyVolumesUsed(tableNames[1], true, v8, v9);
// verify writes to new dir
getConnector().tableOperations().compact(tableNames[0], null, null, true, true);
getConnector().tableOperations().compact(tableNames[1], null, null, true, true);
verifyVolumesUsed(tableNames[0], true, v8, v9);
verifyVolumesUsed(tableNames[1], true, v8, v9);
// check that root tablet is not on volume 1 or 2
ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH;
String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
Assert.assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));
getConnector().tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(), new HashSet<>());
getConnector().tableOperations().flush(MetadataTable.NAME, null, null, true);
getConnector().tableOperations().flush(RootTable.NAME, null, null, true);
verifyVolumesUsed(tableNames[0], true, v8, v9);
verifyVolumesUsed(tableNames[1], true, v8, v9);
verifyVolumesUsed(tableNames[2], true, v8, v9);
}
use of org.apache.accumulo.fate.zookeeper.ZooReader in project accumulo by apache.
the class VolumeIT method testRemoveVolumes.
@Test
public void testRemoveVolumes() throws Exception {
String[] tableNames = getUniqueNames(2);
verifyVolumesUsed(tableNames[0], false, v1, v2);
Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
cluster.stop();
Configuration conf = new Configuration(false);
conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString());
BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
conf.writeXml(fos);
fos.close();
// start cluster and verify that volume was decommisioned
cluster.start();
Connector conn = cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
conn.tableOperations().compact(tableNames[0], null, null, true, true);
verifyVolumesUsed(tableNames[0], true, v2);
// check that root tablet is not on volume 1
ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH;
String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
Assert.assertTrue(rootTabletDir.startsWith(v2.toString()));
conn.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(), new HashSet<>());
conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
conn.tableOperations().flush(RootTable.NAME, null, null, true);
verifyVolumesUsed(tableNames[0], true, v2);
verifyVolumesUsed(tableNames[1], true, v2);
}
use of org.apache.accumulo.fate.zookeeper.ZooReader in project accumulo by apache.
the class ThriftServerBindsBeforeZooKeeperLockIT method testMasterService.
@Test
public void testMasterService() throws Exception {
final MiniAccumuloClusterImpl cluster = (MiniAccumuloClusterImpl) getCluster();
final ZooKeeperInstance inst = new ZooKeeperInstance(cluster.getClientConfig());
// Wait for the Master to grab its lock
while (true) {
final ZooReader reader = new ZooReader(inst.getZooKeepers(), 30000);
try {
List<String> locks = reader.getChildren(Constants.ZROOT + "/" + inst.getInstanceID() + Constants.ZMASTER_LOCK);
if (locks.size() > 0) {
break;
}
} catch (Exception e) {
LOG.debug("Failed to find active master location, retrying", e);
Thread.sleep(1000);
}
}
LOG.debug("Found active master");
while (true) {
int freePort = PortUtils.getRandomFreePort();
Process master = null;
try {
LOG.debug("Starting standby master on {}", freePort);
master = startProcess(cluster, ServerType.MASTER, freePort);
while (true) {
Socket s = null;
try {
s = new Socket("localhost", freePort);
if (s.isConnected()) {
// Pass
return;
}
} catch (Exception e) {
LOG.debug("Caught exception trying to connect to Master", e);
} finally {
if (null != s) {
s.close();
}
}
// Wait before trying again
Thread.sleep(1000);
// died trying to bind it. Pick a new port and restart it in that case.
if (!master.isAlive()) {
freePort = PortUtils.getRandomFreePort();
LOG.debug("Master died, restarting it listening on {}", freePort);
master = startProcess(cluster, ServerType.MASTER, freePort);
}
}
} finally {
if (null != master) {
master.destroyForcibly();
}
}
}
}
use of org.apache.accumulo.fate.zookeeper.ZooReader in project accumulo by apache.
the class MultiTserverReplicationIT method tserverReplicationServicePortsAreAdvertised.
@Test
public void tserverReplicationServicePortsAreAdvertised() throws Exception {
// Wait for the cluster to be up
Connector conn = getConnector();
Instance inst = conn.getInstance();
// Wait for a tserver to come up to fulfill this request
conn.tableOperations().create("foo");
try (Scanner s = conn.createScanner("foo", Authorizations.EMPTY)) {
Assert.assertEquals(0, Iterables.size(s));
ZooReader zreader = new ZooReader(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut());
Set<String> tserverHost = new HashSet<>();
tserverHost.addAll(zreader.getChildren(ZooUtil.getRoot(inst) + Constants.ZTSERVERS));
Set<HostAndPort> replicationServices = new HashSet<>();
for (String tserver : tserverHost) {
try {
byte[] portData = zreader.getData(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_TSERVERS + "/" + tserver, null);
HostAndPort replAddress = HostAndPort.fromString(new String(portData, UTF_8));
replicationServices.add(replAddress);
} catch (Exception e) {
log.error("Could not find port for {}", tserver, e);
Assert.fail("Did not find replication port advertisement for " + tserver);
}
}
// Each tserver should also have equial replicaiton services running internally
Assert.assertEquals("Expected an equal number of replication servicers and tservers", tserverHost.size(), replicationServices.size());
}
}
use of org.apache.accumulo.fate.zookeeper.ZooReader in project accumulo by apache.
the class ReadWriteIT method sunnyDay.
@Test
public void sunnyDay() throws Exception {
// Start accumulo, create a table, insert some data, verify we can read it out.
// Shutdown cleanly.
log.debug("Starting Monitor");
cluster.getClusterControl().startAllServers(ServerType.MONITOR);
Connector connector = getConnector();
String tableName = getUniqueNames(1)[0];
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
String monitorLocation = null;
while (null == monitorLocation) {
monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
if (null == monitorLocation) {
log.debug("Could not fetch monitor HTTP address from zookeeper");
Thread.sleep(2000);
}
}
String scheme = "http://";
if (getCluster() instanceof StandaloneAccumuloCluster) {
StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster();
File accumuloSite = new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo-site.xml");
if (accumuloSite.isFile()) {
Configuration conf = new Configuration(false);
conf.addResource(new Path(accumuloSite.toURI()));
String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey());
if (null != monitorSslKeystore) {
log.info("Setting scheme to HTTPS since monitor ssl keystore configuration was observed in {}", accumuloSite);
scheme = "https://";
SSLContext ctx = SSLContext.getInstance("SSL");
TrustManager[] tm = new TrustManager[] { new TestTrustManager() };
ctx.init(new KeyManager[0], tm, new SecureRandom());
SSLContext.setDefault(ctx);
HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
}
} else {
log.info("{} is not a normal file, not checking for monitor running with SSL", accumuloSite);
}
}
URL url = new URL(scheme + monitorLocation);
log.debug("Fetching web page {}", url);
String result = FunctionalTestUtils.readAll(url.openStream());
assertTrue(result.length() > 100);
log.debug("Stopping accumulo cluster");
ClusterControl control = cluster.getClusterControl();
control.adminStopAll();
ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), connector.getInstance().getZooKeepersSessionTimeOut());
ZooCache zcache = new ZooCache(zreader, null);
byte[] masterLockData;
do {
masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null);
if (null != masterLockData) {
log.info("Master lock is still held");
Thread.sleep(1000);
}
} while (null != masterLockData);
control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
control.stopAllServers(ServerType.MONITOR);
control.stopAllServers(ServerType.TRACER);
log.debug("success!");
// Restarting everything
cluster.start();
}
Aggregations