Search in sources :

Example 21 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class UserCompactionStrategyIT method testIterators.

@Test
public void testIterators() throws Exception {
    // test compaction strategy + iterators
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    writeFlush(c, tableName, "a");
    writeFlush(c, tableName, "b");
    // create a file that starts with A containing rows 'a' and 'b'
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
    writeFlush(c, tableName, "c");
    writeFlush(c, tableName, "d");
    Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
    // drop files that start with A
    CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
    csConfig.setOptions(ImmutableMap.of("inputPrefix", "F"));
    IteratorSetting iterConf = new IteratorSetting(21, "myregex", RegExFilter.class);
    RegExFilter.setRegexs(iterConf, "a|c", null, null, null, false);
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig).setIterators(Arrays.asList(iterConf)));
    // compaction strategy should only be applied to one file. If its applied to both, then row 'b' would be dropped by filter.
    Assert.assertEquals(ImmutableSet.of("a", "b", "c"), getRows(c, tableName));
    Assert.assertEquals(2, FunctionalTestUtils.countRFiles(c, tableName));
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
    // ensure that iterator is not applied
    Assert.assertEquals(ImmutableSet.of("a", "b", "c"), getRows(c, tableName));
    Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
}
Also used : CompactionStrategyConfig(org.apache.accumulo.core.client.admin.CompactionStrategyConfig) Connector(org.apache.accumulo.core.client.Connector) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Test(org.junit.Test)

Example 22 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class UserCompactionStrategyIT method testFileSize.

@Test
public void testFileSize() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    // write random data because its very unlikely it will compress
    writeRandomValue(c, tableName, 1 << 16);
    writeRandomValue(c, tableName, 1 << 16);
    writeRandomValue(c, tableName, 1 << 9);
    writeRandomValue(c, tableName, 1 << 7);
    writeRandomValue(c, tableName, 1 << 6);
    Assert.assertEquals(5, FunctionalTestUtils.countRFiles(c, tableName));
    CompactionStrategyConfig csConfig = new CompactionStrategyConfig(SizeCompactionStrategy.class.getName());
    csConfig.setOptions(ImmutableMap.of("size", "" + (1 << 15)));
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
    Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
    csConfig = new CompactionStrategyConfig(SizeCompactionStrategy.class.getName());
    csConfig.setOptions(ImmutableMap.of("size", "" + (1 << 17)));
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
    Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
}
Also used : CompactionStrategyConfig(org.apache.accumulo.core.client.admin.CompactionStrategyConfig) Connector(org.apache.accumulo.core.client.Connector) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Test(org.junit.Test)

Example 23 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class UsersIT method testCreateExistingUser.

@Test
public void testCreateExistingUser() throws Exception {
    ClusterUser user0 = getUser(0);
    Connector conn = getConnector();
    Set<String> currentUsers = conn.securityOperations().listLocalUsers();
    // Ensure that the user exists
    if (!currentUsers.contains(user0.getPrincipal())) {
        PasswordToken token = null;
        if (!getCluster().getClientConfig().hasSasl()) {
            token = new PasswordToken(user0.getPassword());
        }
        conn.securityOperations().createLocalUser(user0.getPrincipal(), token);
    }
    try {
        conn.securityOperations().createLocalUser(user0.getPrincipal(), new PasswordToken("better_fail"));
        fail("Creating a user that already exists should throw an exception");
    } catch (AccumuloSecurityException e) {
        assertTrue("Expected USER_EXISTS error", SecurityErrorCode.USER_EXISTS == e.getSecurityErrorCode());
        String msg = e.getMessage();
        assertTrue("Error message didn't contain principal: '" + msg + "'", msg.contains(user0.getPrincipal()));
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) ClusterUser(org.apache.accumulo.cluster.ClusterUser) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) Test(org.junit.Test)

Example 24 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class VerifySerialRecoveryIT method testSerializedRecovery.

@Test(timeout = 4 * 60 * 1000)
public void testSerializedRecovery() throws Exception {
    // make a table with many splits
    String tableName = getUniqueNames(1)[0];
    Connector c = getConnector();
    c.tableOperations().create(tableName);
    SortedSet<Text> splits = new TreeSet<>();
    for (int i = 0; i < 200; i++) {
        splits.add(new Text(randomHex(8)));
    }
    c.tableOperations().addSplits(tableName, splits);
    // load data to give the recovery something to do
    BatchWriter bw = c.createBatchWriter(tableName, null);
    for (int i = 0; i < 50000; i++) {
        Mutation m = new Mutation(randomHex(8));
        m.put("", "", "");
        bw.addMutation(m);
    }
    bw.close();
    // kill the tserver
    for (ProcessReference ref : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) getCluster().killProcess(ServerType.TABLET_SERVER, ref);
    final Process ts = cluster.exec(TabletServer.class);
    // wait for recovery
    Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
    ts.waitFor();
    String result = FunctionalTestUtils.readAll(cluster, TabletServer.class, ts);
    for (String line : result.split("\n")) {
        System.out.println(line);
    }
    // walk through the output, verifying that only a single normal recovery was running at one time
    boolean started = false;
    int recoveries = 0;
    for (String line : result.split("\n")) {
        // ignore metadata tables
        if (line.contains("!0") || line.contains("+r"))
            continue;
        if (line.contains("Starting Write-Ahead Log")) {
            assertFalse(started);
            started = true;
            recoveries++;
        }
        if (line.contains("Write-Ahead Log recovery complete")) {
            assertTrue(started);
            started = false;
        }
    }
    assertFalse(started);
    assertTrue(recoveries > 0);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) TreeSet(java.util.TreeSet) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 25 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class AddSplitIT method addSplitTest.

@Test
public void addSplitTest() throws Exception {
    String tableName = getUniqueNames(1)[0];
    Connector c = getConnector();
    c.tableOperations().create(tableName);
    insertData(tableName, 1l);
    TreeSet<Text> splits = new TreeSet<>();
    splits.add(new Text(String.format("%09d", 333)));
    splits.add(new Text(String.format("%09d", 666)));
    c.tableOperations().addSplits(tableName, splits);
    sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    Collection<Text> actualSplits = c.tableOperations().listSplits(tableName);
    if (!splits.equals(new TreeSet<>(actualSplits))) {
        throw new Exception(splits + " != " + actualSplits);
    }
    verifyData(tableName, 1l);
    insertData(tableName, 2l);
    // did not clear splits on purpose, it should ignore existing split points
    // and still create the three additional split points
    splits.add(new Text(String.format("%09d", 200)));
    splits.add(new Text(String.format("%09d", 500)));
    splits.add(new Text(String.format("%09d", 800)));
    c.tableOperations().addSplits(tableName, splits);
    sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    actualSplits = c.tableOperations().listSplits(tableName);
    if (!splits.equals(new TreeSet<>(actualSplits))) {
        throw new Exception(splits + " != " + actualSplits);
    }
    verifyData(tableName, 2l);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) TreeSet(java.util.TreeSet) Text(org.apache.hadoop.io.Text) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) Test(org.junit.Test)

Aggregations

Connector (org.apache.accumulo.core.client.Connector)622 Test (org.junit.Test)415 BatchWriter (org.apache.accumulo.core.client.BatchWriter)171 Value (org.apache.accumulo.core.data.Value)162 Text (org.apache.hadoop.io.Text)160 Scanner (org.apache.accumulo.core.client.Scanner)158 Mutation (org.apache.accumulo.core.data.Mutation)152 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)143 Key (org.apache.accumulo.core.data.Key)139 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)101 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)87 AccumuloException (org.apache.accumulo.core.client.AccumuloException)83 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)75 Range (org.apache.accumulo.core.data.Range)74 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)65 Authorizations (org.apache.accumulo.core.security.Authorizations)60 HashSet (java.util.HashSet)57 Instance (org.apache.accumulo.core.client.Instance)55 ArrayList (java.util.ArrayList)53 Entry (java.util.Map.Entry)53