Search in sources :

Example 66 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class AccumuloInputFormatIT method testGetSplits.

/**
 * Tests several different paths through the getSplits() method by setting different properties and verifying the results.
 */
@Test
public void testGetSplits() throws Exception {
    Connector conn = getConnector();
    String table = getUniqueNames(1)[0];
    conn.tableOperations().create(table);
    insertData(table, currentTimeMillis());
    ClientConfiguration clientConf = cluster.getClientConfig();
    AccumuloConfiguration clusterClientConf = new ConfigurationCopy(DefaultConfiguration.getInstance());
    // Pass SSL and CredentialProvider options into the ClientConfiguration given to AccumuloInputFormat
    boolean sslEnabled = Boolean.valueOf(clusterClientConf.get(Property.INSTANCE_RPC_SSL_ENABLED));
    if (sslEnabled) {
        ClientProperty[] sslProperties = new ClientProperty[] { ClientProperty.INSTANCE_RPC_SSL_ENABLED, ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH, ClientProperty.RPC_SSL_KEYSTORE_PATH, ClientProperty.RPC_SSL_KEYSTORE_TYPE, ClientProperty.RPC_SSL_KEYSTORE_PASSWORD, ClientProperty.RPC_SSL_TRUSTSTORE_PATH, ClientProperty.RPC_SSL_TRUSTSTORE_TYPE, ClientProperty.RPC_SSL_TRUSTSTORE_PASSWORD, ClientProperty.RPC_USE_JSSE, ClientProperty.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS };
        for (ClientProperty prop : sslProperties) {
            // The default property is returned if it's not in the ClientConfiguration so we don't have to check if the value is actually defined
            clientConf.setProperty(prop, clusterClientConf.get(prop.getKey()));
        }
    }
    Job job = Job.getInstance();
    AccumuloInputFormat.setInputTableName(job, table);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConf);
    AccumuloInputFormat.setConnectorInfo(job, getAdminPrincipal(), getAdminToken());
    // split table
    TreeSet<Text> splitsToAdd = new TreeSet<>();
    for (int i = 0; i < 10000; i += 1000) splitsToAdd.add(new Text(String.format("%09d", i)));
    conn.tableOperations().addSplits(table, splitsToAdd);
    // wait for splits to be propagated
    sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
    // get splits without setting any range
    Collection<Text> actualSplits = conn.tableOperations().listSplits(table);
    List<InputSplit> splits = inputFormat.getSplits(job);
    // No ranges set on the job so it'll start with -inf
    assertEquals(actualSplits.size() + 1, splits.size());
    // set ranges and get splits
    List<Range> ranges = new ArrayList<>();
    for (Text text : actualSplits) ranges.add(new Range(text));
    AccumuloInputFormat.setRanges(job, ranges);
    splits = inputFormat.getSplits(job);
    assertEquals(actualSplits.size(), splits.size());
    // offline mode
    AccumuloInputFormat.setOfflineTableScan(job, true);
    try {
        inputFormat.getSplits(job);
        fail("An exception should have been thrown");
    } catch (IOException e) {
    }
    conn.tableOperations().offline(table, true);
    splits = inputFormat.getSplits(job);
    assertEquals(actualSplits.size(), splits.size());
    // auto adjust ranges
    ranges = new ArrayList<>();
    for (int i = 0; i < 5; i++) // overlapping ranges
    ranges.add(new Range(String.format("%09d", i), String.format("%09d", i + 2)));
    AccumuloInputFormat.setRanges(job, ranges);
    splits = inputFormat.getSplits(job);
    assertEquals(2, splits.size());
    AccumuloInputFormat.setAutoAdjustRanges(job, false);
    splits = inputFormat.getSplits(job);
    assertEquals(ranges.size(), splits.size());
    // BatchScan not available for offline scans
    AccumuloInputFormat.setBatchScan(job, true);
    // Reset auto-adjust ranges too
    AccumuloInputFormat.setAutoAdjustRanges(job, true);
    AccumuloInputFormat.setOfflineTableScan(job, true);
    try {
        inputFormat.getSplits(job);
        fail("An exception should have been thrown");
    } catch (IllegalArgumentException e) {
    }
    conn.tableOperations().online(table, true);
    AccumuloInputFormat.setOfflineTableScan(job, false);
    // test for resumption of success
    splits = inputFormat.getSplits(job);
    assertEquals(2, splits.size());
    // BatchScan not available with isolated iterators
    AccumuloInputFormat.setScanIsolation(job, true);
    try {
        inputFormat.getSplits(job);
        fail("An exception should have been thrown");
    } catch (IllegalArgumentException e) {
    }
    AccumuloInputFormat.setScanIsolation(job, false);
    // test for resumption of success
    splits = inputFormat.getSplits(job);
    assertEquals(2, splits.size());
    // BatchScan not available with local iterators
    AccumuloInputFormat.setLocalIterators(job, true);
    try {
        inputFormat.getSplits(job);
        fail("An exception should have been thrown");
    } catch (IllegalArgumentException e) {
    }
    AccumuloInputFormat.setLocalIterators(job, false);
    // Check we are getting back correct type pf split
    conn.tableOperations().online(table);
    splits = inputFormat.getSplits(job);
    for (InputSplit split : splits) assert (split instanceof BatchInputSplit);
    // We should divide along the tablet lines similar to when using `setAutoAdjustRanges(job, true)`
    assertEquals(2, splits.size());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) ClientProperty(org.apache.accumulo.core.client.ClientConfiguration.ClientProperty) ArrayList(java.util.ArrayList) BatchInputSplit(org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) TreeSet(java.util.TreeSet) Job(org.apache.hadoop.mapreduce.Job) BatchInputSplit(org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit) RangeInputSplit(org.apache.accumulo.core.client.mapreduce.RangeInputSplit) InputSplit(org.apache.hadoop.mapreduce.InputSplit) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Test(org.junit.Test)

Example 67 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class SplitIT method deleteSplit.

@Test
public void deleteSplit() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
    ClientConfiguration clientConfig = getCluster().getClientConfig();
    String password = null, keytab = null;
    if (clientConfig.hasSasl()) {
        keytab = getAdminUser().getKeytab().getAbsolutePath();
    } else {
        password = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8);
    }
    DeleteIT.deleteTest(c, getCluster(), getAdminPrincipal(), password, tableName, keytab);
    c.tableOperations().flush(tableName, null, null, true);
    for (int i = 0; i < 5; i++) {
        sleepUninterruptibly(10, TimeUnit.SECONDS);
        if (c.tableOperations().listSplits(tableName).size() > 20)
            break;
    }
    assertTrue(c.tableOperations().listSplits(tableName).size() > 20);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Test(org.junit.Test)

Example 68 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class SplitIT method tabletShouldSplit.

@Test
public void tabletShouldSplit() throws Exception {
    Connector c = getConnector();
    String table = getUniqueNames(1)[0];
    c.tableOperations().create(table);
    c.tableOperations().setProperty(table, Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K");
    c.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
    TestIngest.Opts opts = new TestIngest.Opts();
    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
    opts.rows = 100000;
    opts.setTableName(table);
    ClientConfiguration clientConfig = cluster.getClientConfig();
    if (clientConfig.hasSasl()) {
        opts.updateKerberosCredentials(clientConfig);
        vopts.updateKerberosCredentials(clientConfig);
    } else {
        opts.setPrincipal(getAdminPrincipal());
        vopts.setPrincipal(getAdminPrincipal());
    }
    TestIngest.ingest(c, opts, new BatchWriterOpts());
    vopts.rows = opts.rows;
    vopts.setTableName(table);
    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
    while (c.tableOperations().listSplits(table).size() < 10) {
        sleepUninterruptibly(15, TimeUnit.SECONDS);
    }
    Table.ID id = Table.ID.of(c.tableOperations().tableIdMap().get(table));
    try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        KeyExtent extent = new KeyExtent(id, null, null);
        s.setRange(extent.toMetadataRange());
        MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
        int count = 0;
        int shortened = 0;
        for (Entry<Key, Value> entry : s) {
            extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
            if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14)
                shortened++;
            count++;
        }
        assertTrue("Shortened should be greater than zero: " + shortened, shortened > 0);
        assertTrue("Count should be cgreater than 10: " + count, count > 10);
    }
    String[] args;
    if (clientConfig.hasSasl()) {
        ClusterUser rootUser = getAdminUser();
        args = new String[] { "-i", cluster.getInstanceName(), "-u", rootUser.getPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-z", cluster.getZooKeepers() };
    } else {
        PasswordToken token = (PasswordToken) getAdminToken();
        args = new String[] { "-i", cluster.getInstanceName(), "-u", "root", "-p", new String(token.getPassword(), UTF_8), "-z", cluster.getZooKeepers() };
    }
    assertEquals(0, getCluster().getClusterControl().exec(CheckForMetadataProblems.class, args));
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) CheckForMetadataProblems(org.apache.accumulo.server.util.CheckForMetadataProblems) TestIngest(org.apache.accumulo.test.TestIngest) VerifyIngest(org.apache.accumulo.test.VerifyIngest) Value(org.apache.accumulo.core.data.Value) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) ClusterUser(org.apache.accumulo.cluster.ClusterUser) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 69 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class ClientOpts method getClientConfiguration.

public ClientConfiguration getClientConfiguration() throws IllegalArgumentException {
    if (cachedClientConfig != null)
        return cachedClientConfig;
    ClientConfiguration clientConfig;
    try {
        if (clientConfigFile == null)
            clientConfig = ClientConfiguration.loadDefault();
        else
            clientConfig = ClientConfiguration.fromFile(new File(clientConfigFile));
    } catch (Exception e) {
        throw new IllegalArgumentException(e);
    }
    if (sslEnabled)
        clientConfig.setProperty(ClientProperty.INSTANCE_RPC_SSL_ENABLED, "true");
    if (saslEnabled)
        clientConfig.setProperty(ClientProperty.INSTANCE_RPC_SASL_ENABLED, "true");
    if (siteFile != null) {
        AccumuloConfiguration config = new AccumuloConfiguration() {

            Configuration xml = new Configuration();

            {
                xml.addResource(new Path(siteFile));
            }

            @Override
            public void getProperties(Map<String, String> props, Predicate<String> filter) {
                for (Entry<String, String> prop : DefaultConfiguration.getInstance()) if (filter.test(prop.getKey()))
                    props.put(prop.getKey(), prop.getValue());
                for (Entry<String, String> prop : xml) if (filter.test(prop.getKey()))
                    props.put(prop.getKey(), prop.getValue());
            }

            @Override
            public String get(Property property) {
                String value = xml.get(property.getKey());
                if (value != null)
                    return value;
                return DefaultConfiguration.getInstance().get(property);
            }
        };
        this.zookeepers = config.get(Property.INSTANCE_ZK_HOST);
        String volDir = VolumeConfiguration.getVolumeUris(config)[0];
        Path instanceDir = new Path(volDir, "instance_id");
        String instanceIDFromFile = ZooUtil.getInstanceIDFromHdfs(instanceDir, config);
        if (config.getBoolean(Property.INSTANCE_RPC_SSL_ENABLED))
            clientConfig.setProperty(ClientProperty.INSTANCE_RPC_SSL_ENABLED, "true");
        return cachedClientConfig = clientConfig.withInstance(UUID.fromString(instanceIDFromFile)).withZkHosts(zookeepers);
    }
    return cachedClientConfig = clientConfig.withInstance(instance).withZkHosts(zookeepers);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DefaultConfiguration(org.apache.accumulo.core.conf.DefaultConfiguration) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) VolumeConfiguration(org.apache.accumulo.core.volume.VolumeConfiguration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) File(java.io.File) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) ClientProperty(org.apache.accumulo.core.client.ClientConfiguration.ClientProperty) Property(org.apache.accumulo.core.conf.Property) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Predicate(java.util.function.Predicate)

Example 70 with ClientConfiguration

use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.

the class ClientOpts method updateKerberosCredentials.

/**
 * Automatically update the options to use a KerberosToken when SASL is enabled for RPCs. Don't overwrite the options if the user has provided something
 * specifically.
 */
public void updateKerberosCredentials() {
    ClientConfiguration clientConfig;
    try {
        if (clientConfigFile == null)
            clientConfig = ClientConfiguration.loadDefault();
        else
            clientConfig = ClientConfiguration.fromFile(new File(clientConfigFile));
    } catch (Exception e) {
        throw new IllegalArgumentException(e);
    }
    updateKerberosCredentials(clientConfig);
}
Also used : File(java.io.File) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException)

Aggregations

ClientConfiguration (org.apache.accumulo.core.client.ClientConfiguration)79 Test (org.junit.Test)40 Connector (org.apache.accumulo.core.client.Connector)28 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)28 IOException (java.io.IOException)16 TestIngest (org.apache.accumulo.test.TestIngest)15 BatchWriterOpts (org.apache.accumulo.core.cli.BatchWriterOpts)13 ScannerOpts (org.apache.accumulo.core.cli.ScannerOpts)12 KerberosToken (org.apache.accumulo.core.client.security.tokens.KerberosToken)12 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)11 VerifyIngest (org.apache.accumulo.test.VerifyIngest)11 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)10 ClusterUser (org.apache.accumulo.cluster.ClusterUser)9 ZooKeeperInstance (org.apache.accumulo.core.client.ZooKeeperInstance)9 Map (java.util.Map)7 AccumuloException (org.apache.accumulo.core.client.AccumuloException)7 AuthenticationToken (org.apache.accumulo.core.client.security.tokens.AuthenticationToken)7 Instance (org.apache.accumulo.core.client.Instance)6 Authorizations (org.apache.accumulo.core.security.Authorizations)6 Path (org.apache.hadoop.fs.Path)6