use of org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration in project accumulo by apache.
the class AccumuloClusterHarness method setupCluster.
@Before
public void setupCluster() throws Exception {
// Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
Assume.assumeTrue(canRunTest(type));
switch(type) {
case MINI:
MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
// Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
cluster = impl;
// MAC makes a ClientConf for us, just set it
((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
// Login as the "root" user
if (null != krb) {
ClusterUser rootUser = krb.getRootUser();
// Log in the 'client' user
UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
}
break;
case STANDALONE:
StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
ClientConfiguration clientConf = conf.getClientConf();
StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers());
// If these are provided in the configuration, pass them into the cluster
standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
standaloneCluster.setServerCmdPrefix(conf.getServerCmdPrefix());
standaloneCluster.setClientCmdPrefix(conf.getClientCmdPrefix());
// For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
if (clientConf.hasSasl()) {
UserGroupInformation.setConfiguration(hadoopConfiguration);
// Login as the admin user to start the tests
UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), conf.getAdminKeytab().getAbsolutePath());
}
// Set the implementation
cluster = standaloneCluster;
break;
default:
throw new RuntimeException("Unhandled type");
}
if (type.isDynamic()) {
cluster.start();
} else {
log.info("Removing tables which appear to be from a previous test run");
cleanupTables();
log.info("Removing users which appear to be from a previous test run");
cleanupUsers();
}
switch(type) {
case MINI:
if (null != krb) {
final String traceTable = Property.TRACE_TABLE.getDefaultValue();
final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
// Login as the trace user
UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
// Open a connector as the system user (ensures the user will exist for us to assign permissions to)
UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());
// Then, log back in as the "root" user and do the grant
UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
conn = getConnector();
// Create the trace table
conn.tableOperations().create(traceTable);
// Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
// to have the ability to read, write and alter the trace table
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
}
break;
default:
}
}
Aggregations