Search in sources :

Example 61 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class BatchWriterIterator method initBatchWriter.

private void initBatchWriter() {
    ClientConfiguration cc = ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeeperHost).withZkTimeout(zookeeperTimeout);
    Instance instance = new ZooKeeperInstance(cc);
    try {
        connector = instance.getConnector(username, auth);
    } catch (Exception e) {
        log.error("failed to connect to Accumulo instance " + instanceName, e);
        throw new RuntimeException(e);
    }
    BatchWriterConfig bwc = new BatchWriterConfig();
    bwc.setMaxMemory(batchWriterMaxMemory);
    bwc.setTimeout(batchWriterTimeout, TimeUnit.SECONDS);
    try {
        batchWriter = connector.createBatchWriter(tableName, bwc);
    } catch (TableNotFoundException e) {
        log.error(tableName + " does not exist in instance " + instanceName, e);
        throw new RuntimeException(e);
    }
}
Also used : TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Instance(org.apache.accumulo.core.client.Instance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) IOException(java.io.IOException) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance)

Example 62 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class AccumuloClusterHarness method setupCluster.

@Before
public void setupCluster() throws Exception {
    // Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
    Assume.assumeTrue(canRunTest(type));
    switch(type) {
        case MINI:
            MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
            // Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
            MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
            cluster = impl;
            // MAC makes a ClientConf for us, just set it
            ((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
            // Login as the "root" user
            if (null != krb) {
                ClusterUser rootUser = krb.getRootUser();
                // Log in the 'client' user
                UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
            }
            break;
        case STANDALONE:
            StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
            ClientConfiguration clientConf = conf.getClientConf();
            StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers());
            // If these are provided in the configuration, pass them into the cluster
            standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
            standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
            standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
            standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
            standaloneCluster.setServerCmdPrefix(conf.getServerCmdPrefix());
            standaloneCluster.setClientCmdPrefix(conf.getClientCmdPrefix());
            // For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
            Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
            if (clientConf.hasSasl()) {
                UserGroupInformation.setConfiguration(hadoopConfiguration);
                // Login as the admin user to start the tests
                UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), conf.getAdminKeytab().getAbsolutePath());
            }
            // Set the implementation
            cluster = standaloneCluster;
            break;
        default:
            throw new RuntimeException("Unhandled type");
    }
    if (type.isDynamic()) {
        cluster.start();
    } else {
        log.info("Removing tables which appear to be from a previous test run");
        cleanupTables();
        log.info("Removing users which appear to be from a previous test run");
        cleanupUsers();
    }
    switch(type) {
        case MINI:
            if (null != krb) {
                final String traceTable = Property.TRACE_TABLE.getDefaultValue();
                final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
                // Login as the trace user
                UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
                // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
                UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
                Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());
                // Then, log back in as the "root" user and do the grant
                UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
                conn = getConnector();
                // Create the trace table
                conn.tableOperations().create(traceTable);
                // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
                // to have the ability to read, write and alter the trace table
                conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
                conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
                conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
            }
            break;
        default:
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) AccumuloMiniClusterConfiguration(org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StandaloneAccumuloClusterConfiguration(org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration) AccumuloClusterPropertyConfiguration(org.apache.accumulo.harness.conf.AccumuloClusterPropertyConfiguration) AccumuloClusterConfiguration(org.apache.accumulo.harness.conf.AccumuloClusterConfiguration) AccumuloMiniClusterConfiguration(org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) StandaloneAccumuloClusterConfiguration(org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) ClusterUser(org.apache.accumulo.cluster.ClusterUser) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) StandaloneAccumuloCluster(org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Before(org.junit.Before)

Example 63 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class ReadWriteIT method multiTableTest.

@Test
public void multiTableTest() throws Exception {
    // Write to multiple tables
    final String instance = cluster.getInstanceName();
    final String keepers = cluster.getZooKeepers();
    final ClusterControl control = cluster.getClusterControl();
    final String prefix = getClass().getSimpleName() + "_" + testName.getMethodName();
    ExecutorService svc = Executors.newFixedThreadPool(2);
    Future<Integer> p1 = svc.submit(new Callable<Integer>() {

        @Override
        public Integer call() {
            try {
                ClientConfiguration clientConf = cluster.getClientConfig();
                // Need to pass along the keytab because of that.
                if (clientConf.hasSasl()) {
                    String principal = getAdminPrincipal();
                    AuthenticationToken token = getAdminToken();
                    assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
                    KerberosToken kt = (KerberosToken) token;
                    assertNotNull("Expected keytab in token", kt.getKeytab());
                    return control.exec(TestMultiTableIngest.class, args("--count", Integer.toString(ROWS), "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab().getAbsolutePath(), "-u", principal));
                }
                return control.exec(TestMultiTableIngest.class, args("--count", Integer.toString(ROWS), "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8), "--tablePrefix", prefix));
            } catch (IOException e) {
                log.error("Error running MultiTableIngest", e);
                return -1;
            }
        }
    });
    Future<Integer> p2 = svc.submit(new Callable<Integer>() {

        @Override
        public Integer call() {
            try {
                ClientConfiguration clientConf = cluster.getClientConfig();
                // Need to pass along the keytab because of that.
                if (clientConf.hasSasl()) {
                    String principal = getAdminPrincipal();
                    AuthenticationToken token = getAdminToken();
                    assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
                    KerberosToken kt = (KerberosToken) token;
                    assertNotNull("Expected keytab in token", kt.getKeytab());
                    return control.exec(TestMultiTableIngest.class, args("--count", Integer.toString(ROWS), "--readonly", "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab().getAbsolutePath(), "-u", principal));
                }
                return control.exec(TestMultiTableIngest.class, args("--count", Integer.toString(ROWS), "--readonly", "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8), "--tablePrefix", prefix));
            } catch (IOException e) {
                log.error("Error running MultiTableIngest", e);
                return -1;
            }
        }
    });
    svc.shutdown();
    while (!svc.isTerminated()) {
        svc.awaitTermination(15, TimeUnit.SECONDS);
    }
    assertEquals(0, p1.get().intValue());
    assertEquals(0, p2.get().intValue());
}
Also used : AuthenticationToken(org.apache.accumulo.core.client.security.tokens.AuthenticationToken) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) IOException(java.io.IOException) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) TestMultiTableIngest(org.apache.accumulo.test.TestMultiTableIngest) ExecutorService(java.util.concurrent.ExecutorService) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) ClusterControl(org.apache.accumulo.cluster.ClusterControl) Test(org.junit.Test)

Example 64 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class RestartIT method restartMaster.

@Test
public void restartMaster() throws Exception {
    Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    OPTS.setTableName(tableName);
    VOPTS.setTableName(tableName);
    c.tableOperations().create(tableName);
    final AuthenticationToken token = getAdminToken();
    final ClusterControl control = getCluster().getClusterControl();
    final String[] args;
    if (token instanceof PasswordToken) {
        byte[] password = ((PasswordToken) token).getPassword();
        args = new String[] { "-u", getAdminPrincipal(), "-p", new String(password, UTF_8), "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName };
        OPTS.setPrincipal(getAdminPrincipal());
        VOPTS.setPrincipal(getAdminPrincipal());
    } else if (token instanceof KerberosToken) {
        ClusterUser rootUser = getAdminUser();
        args = new String[] { "-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName };
        ClientConfiguration clientConfig = cluster.getClientConfig();
        OPTS.updateKerberosCredentials(clientConfig);
        VOPTS.updateKerberosCredentials(clientConfig);
    } else {
        throw new RuntimeException("Unknown token");
    }
    Future<Integer> ret = svc.submit(new Callable<Integer>() {

        @Override
        public Integer call() {
            try {
                return control.exec(TestIngest.class, args);
            } catch (IOException e) {
                log.error("Error running TestIngest", e);
                return -1;
            }
        }
    });
    control.stopAllServers(ServerType.MASTER);
    control.startAllServers(ServerType.MASTER);
    assertEquals(0, ret.get().intValue());
    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) AuthenticationToken(org.apache.accumulo.core.client.security.tokens.AuthenticationToken) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) IOException(java.io.IOException) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) TestIngest(org.apache.accumulo.test.TestIngest) ClusterUser(org.apache.accumulo.cluster.ClusterUser) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) ClusterControl(org.apache.accumulo.cluster.ClusterControl) Test(org.junit.Test)

Example 65 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class ScanIteratorIT method setup.

@Before
public void setup() throws Exception {
    connector = getConnector();
    tableName = getUniqueNames(1)[0];
    connector.tableOperations().create(tableName);
    ClientConfiguration clientConfig = cluster.getClientConfig();
    ClusterUser clusterUser = getUser(0);
    user = clusterUser.getPrincipal();
    PasswordToken userToken;
    if (clientConfig.hasSasl()) {
        userToken = null;
        saslEnabled = true;
    } else {
        userToken = new PasswordToken(clusterUser.getPassword());
        saslEnabled = false;
    }
    if (connector.securityOperations().listLocalUsers().contains(user)) {
        log.info("Dropping {}", user);
        connector.securityOperations().dropLocalUser(user);
    }
    connector.securityOperations().createLocalUser(user, userToken);
    connector.securityOperations().grantTablePermission(user, tableName, TablePermission.READ);
    connector.securityOperations().grantTablePermission(user, tableName, TablePermission.WRITE);
    connector.securityOperations().changeUserAuthorizations(user, AuthsIterator.AUTHS);
}
Also used : PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) ClusterUser(org.apache.accumulo.cluster.ClusterUser) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Before(org.junit.Before)

Aggregations

ClientConfiguration (org.apache.accumulo.core.client.ClientConfiguration)79 Test (org.junit.Test)40 Connector (org.apache.accumulo.core.client.Connector)28 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)28 IOException (java.io.IOException)16 TestIngest (org.apache.accumulo.test.TestIngest)15 BatchWriterOpts (org.apache.accumulo.core.cli.BatchWriterOpts)13 ScannerOpts (org.apache.accumulo.core.cli.ScannerOpts)12 KerberosToken (org.apache.accumulo.core.client.security.tokens.KerberosToken)12 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)11 VerifyIngest (org.apache.accumulo.test.VerifyIngest)11 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)10 ClusterUser (org.apache.accumulo.cluster.ClusterUser)9 ZooKeeperInstance (org.apache.accumulo.core.client.ZooKeeperInstance)9 Map (java.util.Map)7 AccumuloException (org.apache.accumulo.core.client.AccumuloException)7 AuthenticationToken (org.apache.accumulo.core.client.security.tokens.AuthenticationToken)7 Instance (org.apache.accumulo.core.client.Instance)6 Authorizations (org.apache.accumulo.core.security.Authorizations)6 Path (org.apache.hadoop.fs.Path)6