use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.
the class SuspendedTabletsIT method suspensionTestBody.
/**
* Main test body for suspension tests.
*
* @param serverStopper
* callback which shuts down some tablet servers.
*/
private void suspensionTestBody(TServerKiller serverStopper) throws Exception {
Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
Instance instance = new ZooKeeperInstance(getCluster().getClientConfig());
ClientContext ctx = new ClientContext(instance, creds, getCluster().getClientConfig());
String tableName = getUniqueNames(1)[0];
Connector conn = ctx.getConnector();
// Create a table with a bunch of splits
log.info("Creating table " + tableName);
conn.tableOperations().create(tableName);
SortedSet<Text> splitPoints = new TreeSet<>();
for (int i = 1; i < TABLETS; ++i) {
splitPoints.add(new Text("" + i));
}
conn.tableOperations().addSplits(tableName, splitPoints);
// Wait for all of the tablets to hosted ...
log.info("Waiting on hosting and balance");
TabletLocations ds;
for (ds = TabletLocations.retrieve(ctx, tableName); ds.hostedCount != TABLETS; ds = TabletLocations.retrieve(ctx, tableName)) {
Thread.sleep(1000);
}
// ... and balanced.
conn.instanceOperations().waitForBalance();
do {
// Give at least another 5 seconds for migrations to finish up
Thread.sleep(5000);
ds = TabletLocations.retrieve(ctx, tableName);
} while (ds.hostedCount != TABLETS);
// Pray all of our tservers have at least 1 tablet.
Assert.assertEquals(TSERVERS, ds.hosted.keySet().size());
// Kill two tablet servers hosting our tablets. This should put tablets into suspended state, and thus halt balancing.
TabletLocations beforeDeathState = ds;
log.info("Eliminating tablet servers");
serverStopper.eliminateTabletServers(ctx, beforeDeathState, 2);
// Eventually some tablets will be suspended.
log.info("Waiting on suspended tablets");
ds = TabletLocations.retrieve(ctx, tableName);
// Until we can scan the metadata table, the master probably can't either, so won't have been able to suspend the tablets.
// So we note the time that we were first able to successfully scan the metadata table.
long killTime = System.nanoTime();
while (ds.suspended.keySet().size() != 2) {
Thread.sleep(1000);
ds = TabletLocations.retrieve(ctx, tableName);
}
SetMultimap<HostAndPort, KeyExtent> deadTabletsByServer = ds.suspended;
// "belong" to the dead tablet servers, and should be in exactly the same place as before any tserver death.
for (HostAndPort server : deadTabletsByServer.keySet()) {
Assert.assertEquals(deadTabletsByServer.get(server), beforeDeathState.hosted.get(server));
}
Assert.assertEquals(TABLETS, ds.hostedCount + ds.suspendedCount);
// Restart the first tablet server, making sure it ends up on the same port
HostAndPort restartedServer = deadTabletsByServer.keySet().iterator().next();
log.info("Restarting " + restartedServer);
getCluster().getClusterControl().start(ServerType.TABLET_SERVER, null, ImmutableMap.of(Property.TSERV_CLIENTPORT.getKey(), "" + restartedServer.getPort(), Property.TSERV_PORTSEARCH.getKey(), "false"), 1);
// Eventually, the suspended tablets should be reassigned to the newly alive tserver.
log.info("Awaiting tablet unsuspension for tablets belonging to " + restartedServer);
for (ds = TabletLocations.retrieve(ctx, tableName); ds.suspended.containsKey(restartedServer) || ds.assignedCount != 0; ds = TabletLocations.retrieve(ctx, tableName)) {
Thread.sleep(1000);
}
Assert.assertEquals(deadTabletsByServer.get(restartedServer), ds.hosted.get(restartedServer));
// Finally, after much longer, remaining suspended tablets should be reassigned.
log.info("Awaiting tablet reassignment for remaining tablets");
for (ds = TabletLocations.retrieve(ctx, tableName); ds.hostedCount != TABLETS; ds = TabletLocations.retrieve(ctx, tableName)) {
Thread.sleep(1000);
}
long recoverTime = System.nanoTime();
Assert.assertTrue(recoverTime - killTime >= NANOSECONDS.convert(SUSPEND_DURATION, MILLISECONDS));
}
use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.
the class KerberosProxyIT method proxiedUserAccessWithoutAccumuloProxy.
@Test
public void proxiedUserAccessWithoutAccumuloProxy() throws Exception {
final String tableName = getUniqueNames(1)[0];
ClusterUser rootUser = kdc.getRootUser();
final UserGroupInformation rootUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
final UserGroupInformation realUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(proxyPrincipal, proxyKeytab.getAbsolutePath());
final String userWithoutCredentials1 = kdc.qualifyUser(PROXIED_USER1);
final String userWithoutCredentials2 = kdc.qualifyUser(PROXIED_USER2);
final String userWithoutCredentials3 = kdc.qualifyUser(PROXIED_USER3);
final UserGroupInformation proxyUser1 = UserGroupInformation.createProxyUser(userWithoutCredentials1, realUgi);
final UserGroupInformation proxyUser2 = UserGroupInformation.createProxyUser(userWithoutCredentials2, realUgi);
final UserGroupInformation proxyUser3 = UserGroupInformation.createProxyUser(userWithoutCredentials3, realUgi);
// Create a table and user, grant permission to our user to read that table.
rootUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
Connector conn = inst.getConnector(rootUgi.getUserName(), new KerberosToken());
conn.tableOperations().create(tableName);
conn.securityOperations().createLocalUser(userWithoutCredentials1, new PasswordToken("ignored"));
conn.securityOperations().grantTablePermission(userWithoutCredentials1, tableName, TablePermission.READ);
conn.securityOperations().createLocalUser(userWithoutCredentials3, new PasswordToken("ignored"));
conn.securityOperations().grantTablePermission(userWithoutCredentials3, tableName, TablePermission.READ);
return null;
}
});
realUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
Connector conn = inst.getConnector(proxyPrincipal, new KerberosToken());
try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
s.iterator().hasNext();
Assert.fail("Expected to see an exception");
} catch (RuntimeException e) {
int numSecurityExceptionsSeen = Iterables.size(Iterables.filter(Throwables.getCausalChain(e), org.apache.accumulo.core.client.AccumuloSecurityException.class));
assertTrue("Expected to see at least one AccumuloSecurityException, but saw: " + Throwables.getStackTraceAsString(e), numSecurityExceptionsSeen > 0);
}
return null;
}
});
// Allowed to be proxied and has read permission
proxyUser1.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
Connector conn = inst.getConnector(userWithoutCredentials1, new KerberosToken(userWithoutCredentials1));
Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
assertFalse(s.iterator().hasNext());
return null;
}
});
// Allowed to be proxied but does not have read permission
proxyUser2.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
Connector conn = inst.getConnector(userWithoutCredentials2, new KerberosToken(userWithoutCredentials3));
try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
s.iterator().hasNext();
Assert.fail("Expected to see an exception");
} catch (RuntimeException e) {
int numSecurityExceptionsSeen = Iterables.size(Iterables.filter(Throwables.getCausalChain(e), org.apache.accumulo.core.client.AccumuloSecurityException.class));
assertTrue("Expected to see at least one AccumuloSecurityException, but saw: " + Throwables.getStackTraceAsString(e), numSecurityExceptionsSeen > 0);
}
return null;
}
});
// Has read permission but is not allowed to be proxied
proxyUser3.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
ZooKeeperInstance inst = new ZooKeeperInstance(mac.getClientConfig());
try {
inst.getConnector(userWithoutCredentials3, new KerberosToken(userWithoutCredentials3));
Assert.fail("Should not be able to create a Connector as this user cannot be proxied");
} catch (org.apache.accumulo.core.client.AccumuloSecurityException e) {
// Expected, this user cannot be proxied
}
return null;
}
});
}
use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo-examples by apache.
the class ARS method main.
public static void main(String[] args) throws Exception {
final ConsoleReader reader = new ConsoleReader();
ARS ars = null;
while (true) {
String line = reader.readLine(">");
if (line == null)
break;
final String[] tokens = line.split("\\s+");
if (tokens[0].equals("reserve") && tokens.length >= 4 && ars != null) {
// start up multiple threads all trying to reserve the same resource, no more than one should succeed
final ARS fars = ars;
ArrayList<Thread> threads = new ArrayList<>();
for (int i = 3; i < tokens.length; i++) {
final int whoIndex = i;
Runnable reservationTask = new Runnable() {
@Override
public void run() {
try {
reader.println(" " + String.format("%20s", tokens[whoIndex]) + " : " + fars.reserve(tokens[1], tokens[2], tokens[whoIndex]));
} catch (Exception e) {
log.warn("Could not write to the ConsoleReader.", e);
}
}
};
threads.add(new Thread(reservationTask));
}
for (Thread thread : threads) thread.start();
for (Thread thread : threads) thread.join();
} else if (tokens[0].equals("cancel") && tokens.length == 4 && ars != null) {
ars.cancel(tokens[1], tokens[2], tokens[3]);
} else if (tokens[0].equals("list") && tokens.length == 3 && ars != null) {
List<String> reservations = ars.list(tokens[1], tokens[2]);
if (reservations.size() > 0) {
reader.println(" Reservation holder : " + reservations.get(0));
if (reservations.size() > 1)
reader.println(" Wait list : " + reservations.subList(1, reservations.size()));
}
} else if (tokens[0].equals("quit") && tokens.length == 1) {
break;
} else if (tokens[0].equals("connect") && tokens.length == 6 && ars == null) {
ZooKeeperInstance zki = new ZooKeeperInstance(ClientConfiguration.create().withInstance(tokens[1]).withZkHosts(tokens[2]));
Connector conn = zki.getConnector(tokens[3], new PasswordToken(tokens[4]));
if (conn.tableOperations().exists(tokens[5])) {
ars = new ARS(conn, tokens[5]);
reader.println(" connected");
} else
reader.println(" No Such Table");
} else {
System.out.println(" Commands : ");
if (ars == null) {
reader.println(" connect <instance> <zookeepers> <user> <pass> <table>");
} else {
reader.println(" reserve <what> <when> <who> {who}");
reader.println(" cancel <what> <when> <who>");
reader.println(" list <what> <when>");
}
}
}
}
use of org.apache.accumulo.core.client.ZooKeeperInstance in project accumulo by apache.
the class ClientContext method getConnector.
/**
* Retrieve a connector
*/
public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
// avoid making more connectors than necessary
if (conn == null) {
if (getInstance() instanceof ZooKeeperInstance) {
// reuse existing context
conn = new ConnectorImpl(this);
} else {
Credentials c = getCredentials();
conn = getInstance().getConnector(c.getPrincipal(), c.getToken());
}
}
return conn;
}
use of org.apache.accumulo.core.client.ZooKeeperInstance in project hive by apache.
the class TestHiveAccumuloTableInputFormat method testConfigureAccumuloInputFormatWithEmptyColumns.
@Test
public void testConfigureAccumuloInputFormatWithEmptyColumns() throws Exception {
AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf);
ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes);
HashSet<Pair<Text, Text>> cfCqPairs = Sets.newHashSet();
List<IteratorSetting> iterators = new ArrayList<IteratorSetting>();
Set<Range> ranges = Collections.singleton(new Range());
String instanceName = "realInstance";
String zookeepers = "host1:2181,host2:2181,host3:2181";
IteratorSetting cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class);
cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName());
cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName());
cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "dave");
cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:name");
iterators.add(cfg);
cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class);
cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, IntCompare.class.getName());
cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName());
cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "50");
cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:age");
iterators.add(cfg);
ZooKeeperInstance zkInstance = Mockito.mock(ZooKeeperInstance.class);
HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class);
HiveAccumuloHelper helper = Mockito.mock(HiveAccumuloHelper.class);
// Stub out the ZKI mock
Mockito.when(zkInstance.getInstanceName()).thenReturn(instanceName);
Mockito.when(zkInstance.getZooKeepers()).thenReturn(zookeepers);
Mockito.when(mockInputFormat.getPairCollection(columnMapper.getColumnMappings())).thenReturn(cfCqPairs);
// Stub out a mocked Helper instance
Mockito.when(mockInputFormat.getHelper()).thenReturn(helper);
// Call out to the real configure method
Mockito.doCallRealMethod().when(mockInputFormat).configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges);
// Also compute the correct cf:cq pairs so we can assert the right argument was passed
Mockito.doCallRealMethod().when(mockInputFormat).getPairCollection(columnMapper.getColumnMappings());
mockInputFormat.configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges);
// Verify that the correct methods are invoked on AccumuloInputFormat
Mockito.verify(helper).setInputFormatZooKeeperInstance(conf, instanceName, zookeepers, false);
Mockito.verify(helper).setInputFormatConnectorInfo(conf, USER, new PasswordToken(PASS));
Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE);
Mockito.verify(mockInputFormat).setScanAuthorizations(conf, con.securityOperations().getUserAuthorizations(USER));
Mockito.verify(mockInputFormat).addIterators(conf, iterators);
Mockito.verify(mockInputFormat).setRanges(conf, ranges);
// fetchColumns is not called because we had no columns to fetch
}
Aggregations