use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class BatchWriterIterator method initBatchWriter.
private void initBatchWriter() {
ClientConfiguration cc = ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeeperHost).withZkTimeout(zookeeperTimeout);
Instance instance = new ZooKeeperInstance(cc);
try {
connector = instance.getConnector(username, auth);
} catch (Exception e) {
log.error("failed to connect to Accumulo instance " + instanceName, e);
throw new RuntimeException(e);
}
BatchWriterConfig bwc = new BatchWriterConfig();
bwc.setMaxMemory(batchWriterMaxMemory);
bwc.setTimeout(batchWriterTimeout, TimeUnit.SECONDS);
try {
batchWriter = connector.createBatchWriter(tableName, bwc);
} catch (TableNotFoundException e) {
log.error(tableName + " does not exist in instance " + instanceName, e);
throw new RuntimeException(e);
}
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class AccumuloClusterHarness method setupCluster.
@Before
public void setupCluster() throws Exception {
// Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
Assume.assumeTrue(canRunTest(type));
switch(type) {
case MINI:
MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
// Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
cluster = impl;
// MAC makes a ClientConf for us, just set it
((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
// Login as the "root" user
if (null != krb) {
ClusterUser rootUser = krb.getRootUser();
// Log in the 'client' user
UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
}
break;
case STANDALONE:
StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
ClientConfiguration clientConf = conf.getClientConf();
StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers());
// If these are provided in the configuration, pass them into the cluster
standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
standaloneCluster.setServerCmdPrefix(conf.getServerCmdPrefix());
standaloneCluster.setClientCmdPrefix(conf.getClientCmdPrefix());
// For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
if (clientConf.hasSasl()) {
UserGroupInformation.setConfiguration(hadoopConfiguration);
// Login as the admin user to start the tests
UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), conf.getAdminKeytab().getAbsolutePath());
}
// Set the implementation
cluster = standaloneCluster;
break;
default:
throw new RuntimeException("Unhandled type");
}
if (type.isDynamic()) {
cluster.start();
} else {
log.info("Removing tables which appear to be from a previous test run");
cleanupTables();
log.info("Removing users which appear to be from a previous test run");
cleanupUsers();
}
switch(type) {
case MINI:
if (null != krb) {
final String traceTable = Property.TRACE_TABLE.getDefaultValue();
final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
// Login as the trace user
UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
// Open a connector as the system user (ensures the user will exist for us to assign permissions to)
UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());
// Then, log back in as the "root" user and do the grant
UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
conn = getConnector();
// Create the trace table
conn.tableOperations().create(traceTable);
// Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
// to have the ability to read, write and alter the trace table
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
}
break;
default:
}
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class ReadWriteIT method multiTableTest.
@Test
public void multiTableTest() throws Exception {
// Write to multiple tables
final String instance = cluster.getInstanceName();
final String keepers = cluster.getZooKeepers();
final ClusterControl control = cluster.getClusterControl();
final String prefix = getClass().getSimpleName() + "_" + testName.getMethodName();
ExecutorService svc = Executors.newFixedThreadPool(2);
Future<Integer> p1 = svc.submit(new Callable<Integer>() {
@Override
public Integer call() {
try {
ClientConfiguration clientConf = cluster.getClientConfig();
// Need to pass along the keytab because of that.
if (clientConf.hasSasl()) {
String principal = getAdminPrincipal();
AuthenticationToken token = getAdminToken();
assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
KerberosToken kt = (KerberosToken) token;
assertNotNull("Expected keytab in token", kt.getKeytab());
return control.exec(TestMultiTableIngest.class, args("--count", Integer.toString(ROWS), "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab().getAbsolutePath(), "-u", principal));
}
return control.exec(TestMultiTableIngest.class, args("--count", Integer.toString(ROWS), "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8), "--tablePrefix", prefix));
} catch (IOException e) {
log.error("Error running MultiTableIngest", e);
return -1;
}
}
});
Future<Integer> p2 = svc.submit(new Callable<Integer>() {
@Override
public Integer call() {
try {
ClientConfiguration clientConf = cluster.getClientConfig();
// Need to pass along the keytab because of that.
if (clientConf.hasSasl()) {
String principal = getAdminPrincipal();
AuthenticationToken token = getAdminToken();
assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
KerberosToken kt = (KerberosToken) token;
assertNotNull("Expected keytab in token", kt.getKeytab());
return control.exec(TestMultiTableIngest.class, args("--count", Integer.toString(ROWS), "--readonly", "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab().getAbsolutePath(), "-u", principal));
}
return control.exec(TestMultiTableIngest.class, args("--count", Integer.toString(ROWS), "--readonly", "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8), "--tablePrefix", prefix));
} catch (IOException e) {
log.error("Error running MultiTableIngest", e);
return -1;
}
}
});
svc.shutdown();
while (!svc.isTerminated()) {
svc.awaitTermination(15, TimeUnit.SECONDS);
}
assertEquals(0, p1.get().intValue());
assertEquals(0, p2.get().intValue());
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class RestartIT method restartMaster.
@Test
public void restartMaster() throws Exception {
Connector c = getConnector();
final String tableName = getUniqueNames(1)[0];
OPTS.setTableName(tableName);
VOPTS.setTableName(tableName);
c.tableOperations().create(tableName);
final AuthenticationToken token = getAdminToken();
final ClusterControl control = getCluster().getClusterControl();
final String[] args;
if (token instanceof PasswordToken) {
byte[] password = ((PasswordToken) token).getPassword();
args = new String[] { "-u", getAdminPrincipal(), "-p", new String(password, UTF_8), "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName };
OPTS.setPrincipal(getAdminPrincipal());
VOPTS.setPrincipal(getAdminPrincipal());
} else if (token instanceof KerberosToken) {
ClusterUser rootUser = getAdminUser();
args = new String[] { "-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName };
ClientConfiguration clientConfig = cluster.getClientConfig();
OPTS.updateKerberosCredentials(clientConfig);
VOPTS.updateKerberosCredentials(clientConfig);
} else {
throw new RuntimeException("Unknown token");
}
Future<Integer> ret = svc.submit(new Callable<Integer>() {
@Override
public Integer call() {
try {
return control.exec(TestIngest.class, args);
} catch (IOException e) {
log.error("Error running TestIngest", e);
return -1;
}
}
});
control.stopAllServers(ServerType.MASTER);
control.startAllServers(ServerType.MASTER);
assertEquals(0, ret.get().intValue());
VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class ScanIteratorIT method setup.
@Before
public void setup() throws Exception {
connector = getConnector();
tableName = getUniqueNames(1)[0];
connector.tableOperations().create(tableName);
ClientConfiguration clientConfig = cluster.getClientConfig();
ClusterUser clusterUser = getUser(0);
user = clusterUser.getPrincipal();
PasswordToken userToken;
if (clientConfig.hasSasl()) {
userToken = null;
saslEnabled = true;
} else {
userToken = new PasswordToken(clusterUser.getPassword());
saslEnabled = false;
}
if (connector.securityOperations().listLocalUsers().contains(user)) {
log.info("Dropping {}", user);
connector.securityOperations().dropLocalUser(user);
}
connector.securityOperations().createLocalUser(user, userToken);
connector.securityOperations().grantTablePermission(user, tableName, TablePermission.READ);
connector.securityOperations().grantTablePermission(user, tableName, TablePermission.WRITE);
connector.securityOperations().changeUserAuthorizations(user, AuthsIterator.AUTHS);
}
Aggregations