Search in sources :

Example 1 with Password

use of org.apache.accumulo.core.cli.ClientOpts.Password in project accumulo by apache.

the class DeleteIT method deleteTest.

public static void deleteTest(Connector c, AccumuloCluster cluster, String user, String password, String tableName, String keytab) throws Exception {
    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
    TestIngest.Opts opts = new TestIngest.Opts();
    vopts.setTableName(tableName);
    opts.setTableName(tableName);
    vopts.rows = opts.rows = 1000;
    vopts.cols = opts.cols = 1;
    vopts.random = opts.random = 56;
    assertTrue("Expected one of password or keytab", null != password || null != keytab);
    if (null != password) {
        assertNull("Given password, expected null keytab", keytab);
        Password passwd = new Password(password);
        opts.setPassword(passwd);
        opts.setPrincipal(user);
        vopts.setPassword(passwd);
        vopts.setPrincipal(user);
    }
    if (null != keytab) {
        assertNull("Given keytab, expect null password", password);
        ClientConfiguration clientConfig = cluster.getClientConfig();
        opts.updateKerberosCredentials(clientConfig);
        vopts.updateKerberosCredentials(clientConfig);
    }
    BatchWriterOpts BWOPTS = new BatchWriterOpts();
    TestIngest.ingest(c, opts, BWOPTS);
    String[] args = null;
    assertTrue("Expected one of password or keytab", null != password || null != keytab);
    if (null != password) {
        assertNull("Given password, expected null keytab", keytab);
        args = new String[] { "-u", user, "-p", password, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName };
    }
    if (null != keytab) {
        assertNull("Given keytab, expect null password", password);
        args = new String[] { "-u", user, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName, "--keytab", keytab };
    }
    assertEquals(0, cluster.getClusterControl().exec(TestRandomDeletes.class, args));
    TestIngest.ingest(c, opts, BWOPTS);
    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
}
Also used : ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) VerifyIngest(org.apache.accumulo.test.VerifyIngest) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) TestIngest(org.apache.accumulo.test.TestIngest) TestRandomDeletes(org.apache.accumulo.test.TestRandomDeletes) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Password(org.apache.accumulo.core.cli.ClientOpts.Password)

Example 2 with Password

use of org.apache.accumulo.core.cli.ClientOpts.Password in project accumulo by apache.

the class BulkSplitOptimizationIT method testBulkSplitOptimization.

@Test
public void testBulkSplitOptimization() throws Exception {
    final Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "1000");
    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
    FileSystem fs = cluster.getFileSystem();
    Path testDir = new Path(getUsableDir(), "testmf");
    FunctionalTestUtils.createRFiles(c, fs, testDir.toString(), ROWS, SPLITS, 8);
    FileStatus[] stats = fs.listStatus(testDir);
    System.out.println("Number of generated files: " + stats.length);
    FunctionalTestUtils.bulkImport(c, fs, tableName, testDir.toString());
    FunctionalTestUtils.checkSplits(c, tableName, 0, 0);
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 100, 100);
    // initiate splits
    getConnector().tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");
    sleepUninterruptibly(2, TimeUnit.SECONDS);
    // wait until over split threshold -- should be 78 splits
    while (getConnector().tableOperations().listSplits(tableName).size() < 75) {
        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    }
    FunctionalTestUtils.checkSplits(c, tableName, 50, 100);
    VerifyIngest.Opts opts = new VerifyIngest.Opts();
    opts.timestamp = 1;
    opts.dataSize = 50;
    opts.random = 56;
    opts.rows = 100000;
    opts.startRow = 0;
    opts.cols = 1;
    opts.setTableName(tableName);
    AuthenticationToken adminToken = getAdminToken();
    if (adminToken instanceof PasswordToken) {
        PasswordToken token = (PasswordToken) getAdminToken();
        opts.setPassword(new Password(new String(token.getPassword(), UTF_8)));
        opts.setPrincipal(getAdminPrincipal());
    } else if (adminToken instanceof KerberosToken) {
        ClientConfiguration clientConf = cluster.getClientConfig();
        opts.updateKerberosCredentials(clientConf);
    } else {
        Assert.fail("Unknown token type");
    }
    VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
    // ensure each tablet does not have all map files, should be ~2.5 files per tablet
    FunctionalTestUtils.checkRFiles(c, tableName, 50, 100, 1, 4);
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) FileStatus(org.apache.hadoop.fs.FileStatus) AuthenticationToken(org.apache.accumulo.core.client.security.tokens.AuthenticationToken) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) VerifyIngest(org.apache.accumulo.test.VerifyIngest) FileSystem(org.apache.hadoop.fs.FileSystem) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Password(org.apache.accumulo.core.cli.ClientOpts.Password) Test(org.junit.Test)

Example 3 with Password

use of org.apache.accumulo.core.cli.ClientOpts.Password in project accumulo by apache.

the class CompactionIT method test.

@Test
public void test() throws Exception {
    final Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
    FileSystem fs = getFileSystem();
    Path root = new Path(cluster.getTemporaryPath(), getClass().getName());
    Path testrf = new Path(root, "testrf");
    FunctionalTestUtils.createRFiles(c, fs, testrf.toString(), 500000, 59, 4);
    FunctionalTestUtils.bulkImport(c, fs, tableName, testrf.toString());
    int beforeCount = countFiles(c);
    final AtomicBoolean fail = new AtomicBoolean(false);
    final ClientConfiguration clientConf = cluster.getClientConfig();
    final int THREADS = 5;
    for (int count = 0; count < THREADS; count++) {
        ExecutorService executor = Executors.newFixedThreadPool(THREADS);
        final int span = 500000 / 59;
        for (int i = 0; i < 500000; i += 500000 / 59) {
            final int finalI = i;
            Runnable r = new Runnable() {

                @Override
                public void run() {
                    try {
                        VerifyIngest.Opts opts = new VerifyIngest.Opts();
                        opts.startRow = finalI;
                        opts.rows = span;
                        opts.random = 56;
                        opts.dataSize = 50;
                        opts.cols = 1;
                        opts.setTableName(tableName);
                        if (clientConf.hasSasl()) {
                            opts.updateKerberosCredentials(clientConf);
                        } else {
                            opts.setPrincipal(getAdminPrincipal());
                            PasswordToken passwordToken = (PasswordToken) getAdminToken();
                            opts.setPassword(new Password(new String(passwordToken.getPassword(), UTF_8)));
                        }
                        VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
                    } catch (Exception ex) {
                        log.warn("Got exception verifying data", ex);
                        fail.set(true);
                    }
                }
            };
            executor.execute(r);
        }
        executor.shutdown();
        executor.awaitTermination(defaultTimeoutSeconds(), TimeUnit.SECONDS);
        assertFalse("Failed to successfully run all threads, Check the test output for error", fail.get());
    }
    int finalCount = countFiles(c);
    assertTrue(finalCount < beforeCount);
    try {
        getClusterControl().adminStopAll();
    } finally {
        // Make sure the internal state in the cluster is reset (e.g. processes in MAC)
        getCluster().stop();
        if (ClusterType.STANDALONE == getClusterType()) {
            // Then restart things for the next test if it's a standalone
            getCluster().start();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) VerifyIngest(org.apache.accumulo.test.VerifyIngest) FileSystem(org.apache.hadoop.fs.FileSystem) RawLocalFileSystem(org.apache.hadoop.fs.RawLocalFileSystem) ExecutorService(java.util.concurrent.ExecutorService) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Password(org.apache.accumulo.core.cli.ClientOpts.Password) Test(org.junit.Test)

Example 4 with Password

use of org.apache.accumulo.core.cli.ClientOpts.Password in project accumulo by apache.

the class CreateToken method execute.

@Override
public void execute(String[] args) {
    Opts opts = new Opts();
    opts.parseArgs("accumulo create-token", args);
    Password pass = opts.password;
    if (pass == null && opts.securePassword != null) {
        pass = opts.securePassword;
    }
    try {
        String principal = opts.principal;
        if (principal == null) {
            principal = getConsoleReader().readLine("Username (aka principal): ");
        }
        AuthenticationToken token = Class.forName(opts.tokenClassName).asSubclass(AuthenticationToken.class).newInstance();
        Properties props = new Properties();
        for (TokenProperty tp : token.getProperties()) {
            String input;
            if (pass != null && tp.getKey().equals("password")) {
                input = pass.toString();
            } else {
                if (tp.getMask()) {
                    input = getConsoleReader().readLine(tp.getDescription() + ": ", '*');
                } else {
                    input = getConsoleReader().readLine(tp.getDescription() + ": ");
                }
            }
            props.put(tp.getKey(), input);
            token.init(props);
        }
        String tokenBase64 = Base64.getEncoder().encodeToString(AuthenticationTokenSerializer.serialize(token));
        String tokenFile = opts.tokenFile;
        if (tokenFile == null) {
            tokenFile = getConsoleReader().readLine("File to save auth token to: ");
        }
        File tf = new File(tokenFile);
        if (!tf.exists()) {
            if (!tf.createNewFile()) {
                throw new IOException("Couldn't create " + tf.getCanonicalPath());
            }
        }
        PrintStream out = new PrintStream(new FileOutputStream(tf, true), true, UTF_8.name());
        String outString = principal + ":" + opts.tokenClassName + ":" + tokenBase64;
        out.println(outString);
        out.close();
        System.out.println("Token written to " + tokenFile + ". Remember to upload it to hdfs.");
    } catch (IOException | InstantiationException | IllegalAccessException | ClassNotFoundException e) {
        throw new RuntimeException(e);
    }
}
Also used : PrintStream(java.io.PrintStream) AuthenticationToken(org.apache.accumulo.core.client.security.tokens.AuthenticationToken) TokenProperty(org.apache.accumulo.core.client.security.tokens.AuthenticationToken.TokenProperty) IOException(java.io.IOException) Properties(org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties) FileOutputStream(java.io.FileOutputStream) File(java.io.File) Password(org.apache.accumulo.core.cli.ClientOpts.Password)

Aggregations

Password (org.apache.accumulo.core.cli.ClientOpts.Password)4 ScannerOpts (org.apache.accumulo.core.cli.ScannerOpts)3 ClientConfiguration (org.apache.accumulo.core.client.ClientConfiguration)3 VerifyIngest (org.apache.accumulo.test.VerifyIngest)3 Connector (org.apache.accumulo.core.client.Connector)2 AuthenticationToken (org.apache.accumulo.core.client.security.tokens.AuthenticationToken)2 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 Test (org.junit.Test)2 File (java.io.File)1 FileOutputStream (java.io.FileOutputStream)1 IOException (java.io.IOException)1 PrintStream (java.io.PrintStream)1 ExecutorService (java.util.concurrent.ExecutorService)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 BatchWriterOpts (org.apache.accumulo.core.cli.BatchWriterOpts)1 Properties (org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties)1 TokenProperty (org.apache.accumulo.core.client.security.tokens.AuthenticationToken.TokenProperty)1 KerberosToken (org.apache.accumulo.core.client.security.tokens.KerberosToken)1