use of org.apache.accumulo.core.client.ZooKeeperInstance in project YCSB by brianfrankcooper.
the class AccumuloClient method init.
@Override
public void init() throws DBException {
colFam = new Text(getProperties().getProperty("accumulo.columnFamily"));
inst = new ZooKeeperInstance(getProperties().getProperty("accumulo.instanceName"), getProperties().getProperty("accumulo.zooKeepers"));
try {
String principal = getProperties().getProperty("accumulo.username");
AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password"));
connector = inst.getConnector(principal, token);
} catch (AccumuloException e) {
throw new DBException(e);
} catch (AccumuloSecurityException e) {
throw new DBException(e);
}
if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) {
System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work.");
}
}
use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.
the class VolumeIT method testReplaceVolume.
private void testReplaceVolume(boolean cleanShutdown) throws Exception {
String[] tableNames = getUniqueNames(3);
verifyVolumesUsed(tableNames[0], false, v1, v2);
// write to 2nd table, but do not flush data to disk before shutdown
writeData(tableNames[1], cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)));
if (cleanShutdown)
Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
cluster.stop();
File v1f = new File(v1.toUri());
File v8f = new File(new File(v1.getParent().toUri()), "v8");
Assert.assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
Path v8 = new Path(v8f.toURI());
File v2f = new File(v2.toUri());
File v9f = new File(new File(v2.getParent().toUri()), "v9");
Assert.assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
Path v9 = new Path(v9f.toURI());
Configuration conf = new Configuration(false);
conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
conf.set(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
conf.set(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
conf.writeXml(fos);
fos.close();
// start cluster and verify that volumes were replaced
cluster.start();
verifyVolumesUsed(tableNames[0], true, v8, v9);
verifyVolumesUsed(tableNames[1], true, v8, v9);
// verify writes to new dir
getConnector().tableOperations().compact(tableNames[0], null, null, true, true);
getConnector().tableOperations().compact(tableNames[1], null, null, true, true);
verifyVolumesUsed(tableNames[0], true, v8, v9);
verifyVolumesUsed(tableNames[1], true, v8, v9);
// check that root tablet is not on volume 1 or 2
ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH;
String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
Assert.assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));
getConnector().tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(), new HashSet<>());
getConnector().tableOperations().flush(MetadataTable.NAME, null, null, true);
getConnector().tableOperations().flush(RootTable.NAME, null, null, true);
verifyVolumesUsed(tableNames[0], true, v8, v9);
verifyVolumesUsed(tableNames[1], true, v8, v9);
verifyVolumesUsed(tableNames[2], true, v8, v9);
}
use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.
the class VolumeIT method testRemoveVolumes.
@Test
public void testRemoveVolumes() throws Exception {
String[] tableNames = getUniqueNames(2);
verifyVolumesUsed(tableNames[0], false, v1, v2);
Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
cluster.stop();
Configuration conf = new Configuration(false);
conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString());
BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
conf.writeXml(fos);
fos.close();
// start cluster and verify that volume was decommisioned
cluster.start();
Connector conn = cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
conn.tableOperations().compact(tableNames[0], null, null, true, true);
verifyVolumesUsed(tableNames[0], true, v2);
// check that root tablet is not on volume 1
ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH;
String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
Assert.assertTrue(rootTabletDir.startsWith(v2.toString()));
conn.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<>(), new HashSet<>());
conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
conn.tableOperations().flush(RootTable.NAME, null, null, true);
verifyVolumesUsed(tableNames[0], true, v2);
verifyVolumesUsed(tableNames[1], true, v2);
}
use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.
the class ThriftServerBindsBeforeZooKeeperLockIT method testMasterService.
@Test
public void testMasterService() throws Exception {
final MiniAccumuloClusterImpl cluster = (MiniAccumuloClusterImpl) getCluster();
final ZooKeeperInstance inst = new ZooKeeperInstance(cluster.getClientConfig());
// Wait for the Master to grab its lock
while (true) {
final ZooReader reader = new ZooReader(inst.getZooKeepers(), 30000);
try {
List<String> locks = reader.getChildren(Constants.ZROOT + "/" + inst.getInstanceID() + Constants.ZMASTER_LOCK);
if (locks.size() > 0) {
break;
}
} catch (Exception e) {
LOG.debug("Failed to find active master location, retrying", e);
Thread.sleep(1000);
}
}
LOG.debug("Found active master");
while (true) {
int freePort = PortUtils.getRandomFreePort();
Process master = null;
try {
LOG.debug("Starting standby master on {}", freePort);
master = startProcess(cluster, ServerType.MASTER, freePort);
while (true) {
Socket s = null;
try {
s = new Socket("localhost", freePort);
if (s.isConnected()) {
// Pass
return;
}
} catch (Exception e) {
LOG.debug("Caught exception trying to connect to Master", e);
} finally {
if (null != s) {
s.close();
}
}
// Wait before trying again
Thread.sleep(1000);
// died trying to bind it. Pick a new port and restart it in that case.
if (!master.isAlive()) {
freePort = PortUtils.getRandomFreePort();
LOG.debug("Master died, restarting it listening on {}", freePort);
master = startProcess(cluster, ServerType.MASTER, freePort);
}
}
} finally {
if (null != master) {
master.destroyForcibly();
}
}
}
}
use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.
the class BalanceInPresenceOfOfflineTableIT method test.
@Test
public void test() throws Exception {
log.info("Test that balancing is not stopped by an offline table with outstanding migrations.");
log.debug("starting test ingestion");
TestIngest.Opts opts = new TestIngest.Opts();
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
ClientConfiguration conf = cluster.getClientConfig();
if (conf.hasSasl()) {
opts.updateKerberosCredentials(cluster.getClientConfig());
vopts.updateKerberosCredentials(cluster.getClientConfig());
} else {
opts.setPrincipal("root");
vopts.setPrincipal("root");
}
vopts.rows = opts.rows = 200000;
opts.setTableName(TEST_TABLE);
TestIngest.ingest(connector, opts, new BatchWriterOpts());
connector.tableOperations().flush(TEST_TABLE, null, null, true);
vopts.setTableName(TEST_TABLE);
VerifyIngest.verifyIngest(connector, vopts, new ScannerOpts());
log.debug("waiting for balancing, up to ~5 minutes to allow for migration cleanup.");
final long startTime = System.currentTimeMillis();
long currentWait = 10 * 1000;
boolean balancingWorked = false;
Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
while (!balancingWorked && (System.currentTimeMillis() - startTime) < ((5 * 60 + 15) * 1000)) {
Thread.sleep(currentWait);
currentWait *= 2;
log.debug("fetch the list of tablets assigned to each tserver.");
MasterClientService.Iface client = null;
MasterMonitorInfo stats = null;
Instance instance = new ZooKeeperInstance(cluster.getClientConfig());
while (true) {
try {
client = MasterClient.getConnectionWithRetry(new ClientContext(instance, creds, cluster.getClientConfig()));
stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(instance));
break;
} catch (ThriftSecurityException exception) {
throw new AccumuloSecurityException(exception);
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
log.debug("Contacted a Master which is no longer active, retrying");
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} catch (TException exception) {
throw new AccumuloException(exception);
} finally {
if (client != null) {
MasterClient.close(client);
}
}
}
if (stats.getTServerInfoSize() < 2) {
log.debug("we need >= 2 servers. sleeping for {}ms", currentWait);
continue;
}
if (stats.getUnassignedTablets() != 0) {
log.debug("We shouldn't have unassigned tablets. sleeping for {}ms", currentWait);
continue;
}
long[] tabletsPerServer = new long[stats.getTServerInfoSize()];
Arrays.fill(tabletsPerServer, 0l);
for (int i = 0; i < stats.getTServerInfoSize(); i++) {
for (Map.Entry<String, TableInfo> entry : stats.getTServerInfo().get(i).getTableMap().entrySet()) {
tabletsPerServer[i] += entry.getValue().getTablets();
}
}
if (tabletsPerServer[0] <= 10) {
log.debug("We should have > 10 tablets. sleeping for {}ms", currentWait);
continue;
}
long min = NumberUtils.min(tabletsPerServer), max = NumberUtils.max(tabletsPerServer);
log.debug("Min={}, Max={}", min, max);
if ((min / ((double) max)) < 0.5) {
log.debug("ratio of min to max tablets per server should be roughly even. sleeping for {}ms", currentWait);
continue;
}
balancingWorked = true;
}
Assert.assertTrue("did not properly balance", balancingWorked);
}
Aggregations