Search in sources :

Example 71 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class ShellCreateTableIT method testCreateTableWithBinarySplitsFile7.

/**
 * Use shell to create a table with a supplied file containing splits.
 *
 * The splits will be contained in a file, sorted and encoded with a blank line and repeats.
 */
@Test
public void testCreateTableWithBinarySplitsFile7() throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException {
    String splitsFile = System.getProperty("user.dir") + "/target/splitFile";
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        generateSplitsFile(splitsFile, 100, 12, true, true, true, true, true);
        SortedSet<Text> expectedSplits = readSplitsFromFile(splitsFile);
        final String tableName = getUniqueNames(1)[0];
        ts.exec("createtable " + tableName + " -sf " + splitsFile, true);
        Collection<Text> createdSplits = client.tableOperations().listSplits(tableName);
        assertEquals(expectedSplits, new TreeSet<>(createdSplits));
    } finally {
        Files.delete(Paths.get(splitsFile));
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Text(org.apache.hadoop.io.Text) Test(org.junit.Test)

Example 72 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class ShellServerIT method setIterOptionPrompt.

@Test
public void setIterOptionPrompt() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        final String[] tableNames = getUniqueNames(4);
        final String tableName0 = tableNames[0];
        ts.exec("createtable " + tableName0);
        ts.input.set("\n\n");
        // Setting a non-optiondescriber with no name should fail
        ts.exec("setiter -scan -class " + COLUMN_FAMILY_COUNTER_ITERATOR + " -p 30", false);
        // Name as option will work
        ts.exec("setiter -scan -class " + COLUMN_FAMILY_COUNTER_ITERATOR + " -p 30 -name cfcounter", true);
        String expectedKey = "table.iterator.scan.cfcounter";
        String expectedValue = "30," + COLUMN_FAMILY_COUNTER_ITERATOR;
        TableOperations tops = client.tableOperations();
        checkTableForProperty(tops, tableName0, expectedKey, expectedValue);
        ts.exec("deletetable " + tableName0, true);
        final String tableName1 = tableNames[1];
        ts.exec("createtable " + tableName1, true);
        ts.input.set("customcfcounter\n\n");
        // Name on the CLI should override OptionDescriber (or user input name, in this case)
        ts.exec("setiter -scan -class " + COLUMN_FAMILY_COUNTER_ITERATOR + " -p 30", true);
        expectedKey = "table.iterator.scan.customcfcounter";
        expectedValue = "30," + COLUMN_FAMILY_COUNTER_ITERATOR;
        checkTableForProperty(tops, tableName1, expectedKey, expectedValue);
        ts.exec("deletetable " + tableName1, true);
        final String tableName2 = tableNames[2];
        ts.exec("createtable " + tableName2, true);
        ts.input.set("customcfcounter\nname1 value1\nname2 value2\n\n");
        // Name on the CLI should override OptionDescriber (or user input name, in this case)
        ts.exec("setiter -scan -class " + COLUMN_FAMILY_COUNTER_ITERATOR + " -p 30", true);
        expectedKey = "table.iterator.scan.customcfcounter";
        expectedValue = "30," + COLUMN_FAMILY_COUNTER_ITERATOR;
        checkTableForProperty(tops, tableName2, expectedKey, expectedValue);
        expectedKey = "table.iterator.scan.customcfcounter.opt.name1";
        expectedValue = "value1";
        checkTableForProperty(tops, tableName2, expectedKey, expectedValue);
        expectedKey = "table.iterator.scan.customcfcounter.opt.name2";
        expectedValue = "value2";
        checkTableForProperty(tops, tableName2, expectedKey, expectedValue);
        ts.exec("deletetable " + tableName2, true);
        String tableName3 = tableNames[3];
        ts.exec("createtable " + tableName3, true);
        ts.input.set("\nname1 value1.1,value1.2,value1.3\nname2 value2\n\n");
        // Name on the CLI should override OptionDescriber (or user input name, in this case)
        ts.exec("setiter -scan -class " + COLUMN_FAMILY_COUNTER_ITERATOR + " -p 30 -name cfcounter", true);
        expectedKey = "table.iterator.scan.cfcounter";
        expectedValue = "30," + COLUMN_FAMILY_COUNTER_ITERATOR;
        checkTableForProperty(tops, tableName3, expectedKey, expectedValue);
        expectedKey = "table.iterator.scan.cfcounter.opt.name1";
        expectedValue = "value1.1,value1.2,value1.3";
        checkTableForProperty(tops, tableName3, expectedKey, expectedValue);
        expectedKey = "table.iterator.scan.cfcounter.opt.name2";
        expectedValue = "value2";
        checkTableForProperty(tops, tableName3, expectedKey, expectedValue);
        ts.exec("deletetable " + tableName3, true);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) Test(org.junit.Test)

Example 73 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class StatusCombinerMacIT method testCombinerSetOnMetadata.

@Test
public void testCombinerSetOnMetadata() throws Exception {
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        TableOperations tops = client.tableOperations();
        Map<String, EnumSet<IteratorScope>> iterators = tops.listIterators(MetadataTable.NAME);
        assertTrue(iterators.containsKey(ReplicationTableUtil.COMBINER_NAME));
        EnumSet<IteratorScope> scopes = iterators.get(ReplicationTableUtil.COMBINER_NAME);
        assertEquals(3, scopes.size());
        assertTrue(scopes.contains(IteratorScope.scan));
        assertTrue(scopes.contains(IteratorScope.minc));
        assertTrue(scopes.contains(IteratorScope.majc));
        Map<String, String> config = tops.getConfiguration(MetadataTable.NAME);
        Map<String, String> properties = Map.copyOf(config);
        for (IteratorScope scope : scopes) {
            String key = Property.TABLE_ITERATOR_PREFIX.getKey() + scope.name() + "." + ReplicationTableUtil.COMBINER_NAME + ".opt.columns";
            assertTrue("Properties did not contain key : " + key, properties.containsKey(key));
            assertEquals(ReplicationSection.COLF.toString(), properties.get(key));
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) EnumSet(java.util.EnumSet) IteratorScope(org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope) Test(org.junit.Test)

Example 74 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class ShellCreateTableIT method testCreateTableWithSplitsFile2.

/**
 * Use shell to create a table with a supplied file containing splits.
 *
 * The splits will be contained in a file, unsorted and un-encoded with no repeats or blank lines.
 */
@Test
public void testCreateTableWithSplitsFile2() throws IOException, AccumuloSecurityException, TableNotFoundException, AccumuloException {
    String splitsFile = System.getProperty("user.dir") + "/target/splitFile";
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        generateSplitsFile(splitsFile, 300, 12, false, false, false, false, false);
        SortedSet<Text> expectedSplits = readSplitsFromFile(splitsFile);
        final String tableName = getUniqueNames(1)[0];
        ts.exec("createtable " + tableName + " -sf " + splitsFile, true);
        Collection<Text> createdSplits = client.tableOperations().listSplits(tableName);
        assertEquals(expectedSplits, new TreeSet<>(createdSplits));
    } finally {
        Files.delete(Paths.get(splitsFile));
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Text(org.apache.hadoop.io.Text) Test(org.junit.Test)

Example 75 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class AccumuloInputFormatIT method testGetSplitsNoReadPermission.

@Test
public void testGetSplitsNoReadPermission() throws Exception {
    Job job = Job.getInstance();
    String table = getUniqueNames(1)[0];
    Authorizations auths = new Authorizations("foo");
    Collection<Pair<Text, Text>> fetchColumns = Collections.singleton(new Pair<>(new Text("foo"), new Text("bar")));
    boolean isolated = true, localIters = true;
    Level level = Level.WARN;
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        client.tableOperations().create(table);
        client.securityOperations().revokeTablePermission(client.whoami(), table, TablePermission.READ);
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setZooKeeperInstance(job, cluster.getClientConfig());
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setConnectorInfo(job, getAdminPrincipal(), getAdminToken());
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setInputTableName(job, table);
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setScanAuthorizations(job, auths);
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setScanIsolation(job, isolated);
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setLocalIterators(job, localIters);
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.fetchColumns(job, fetchColumns);
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat.setLogLevel(job, level);
        org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat aif = new org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat();
        assertThrows(IOException.class, () -> aif.getSplits(job));
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Authorizations(org.apache.accumulo.core.security.Authorizations) Text(org.apache.hadoop.io.Text) Level(org.apache.log4j.Level) Job(org.apache.hadoop.mapreduce.Job) Pair(org.apache.accumulo.core.util.Pair) Test(org.junit.Test)

Aggregations

AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)500 Test (org.junit.Test)411 BatchWriter (org.apache.accumulo.core.client.BatchWriter)149 Text (org.apache.hadoop.io.Text)143 Mutation (org.apache.accumulo.core.data.Mutation)138 Scanner (org.apache.accumulo.core.client.Scanner)122 Value (org.apache.accumulo.core.data.Value)118 Key (org.apache.accumulo.core.data.Key)108 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)91 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)64 HashMap (java.util.HashMap)61 Range (org.apache.accumulo.core.data.Range)51 TreeSet (java.util.TreeSet)50 ArrayList (java.util.ArrayList)47 Entry (java.util.Map.Entry)41 Path (org.apache.hadoop.fs.Path)39 CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)34 Authorizations (org.apache.accumulo.core.security.Authorizations)34 BatchScanner (org.apache.accumulo.core.client.BatchScanner)32 HashSet (java.util.HashSet)31