Search in sources :

Example 11 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class ConcurrentDeleteTableIT method testConcurrentDeleteTablesOps.

@Test
public void testConcurrentDeleteTablesOps() throws Exception {
    final Connector c = getConnector();
    String[] tables = getUniqueNames(2);
    TreeSet<Text> splits = createSplits();
    ExecutorService es = Executors.newFixedThreadPool(20);
    int count = 0;
    for (final String table : tables) {
        c.tableOperations().create(table);
        c.tableOperations().addSplits(table, splits);
        writeData(c, table);
        if (count == 1) {
            c.tableOperations().flush(table, null, null, true);
        }
        count++;
        int numDeleteOps = 20;
        final CountDownLatch cdl = new CountDownLatch(numDeleteOps);
        List<Future<?>> futures = new ArrayList<>();
        for (int i = 0; i < numDeleteOps; i++) {
            Future<?> future = es.submit(new Runnable() {

                @Override
                public void run() {
                    try {
                        cdl.countDown();
                        cdl.await();
                        c.tableOperations().delete(table);
                    } catch (TableNotFoundException e) {
                    // expected
                    } catch (InterruptedException | AccumuloException | AccumuloSecurityException e) {
                        throw new RuntimeException(e);
                    }
                }
            });
            futures.add(future);
        }
        for (Future<?> future : futures) {
            future.get();
        }
        try {
            c.createScanner(table, Authorizations.EMPTY);
            Assert.fail("Expected table " + table + " to be gone.");
        } catch (TableNotFoundException tnfe) {
        // expected
        }
        FunctionalTestUtils.assertNoDanglingFateLocks(getConnector().getInstance(), getCluster());
    }
    es.shutdown();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) CountDownLatch(java.util.concurrent.CountDownLatch) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 12 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class ConfigurableCompactionIT method test.

@Test
public void test() throws Exception {
    final Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(), SimpleCompactionStrategy.class.getName());
    runTest(c, tableName, 3);
    c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY_PREFIX.getKey() + "count", "" + 5);
    runTest(c, tableName, 5);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Test(org.junit.Test)

Example 13 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class VolumeChooserIT method twoTablesRandomVolumeChooser.

// Test that uses two tables with 10 split points each. They each use the RandomVolumeChooser to choose volumes.
@Test
public void twoTablesRandomVolumeChooser() throws Exception {
    log.info("Starting twoTablesRandomVolumeChooser()");
    // Create namespace
    Connector connector = getConnector();
    connector.namespaceOperations().create(namespace1);
    // Set properties on the namespace
    connector.namespaceOperations().setProperty(namespace1, PerTableVolumeChooser.TABLE_VOLUME_CHOOSER, RandomVolumeChooser.class.getName());
    // Create table1 on namespace1
    String tableName = namespace1 + ".1";
    connector.tableOperations().create(tableName);
    Table.ID tableID = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
    // Add 10 splits to the table
    addSplits(connector, tableName);
    // Write some data to the table
    writeAndReadData(connector, tableName);
    // Verify the new files are written to the Volumes specified
    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
    connector.namespaceOperations().create(namespace2);
    // Set properties on the namespace
    connector.namespaceOperations().setProperty(namespace2, PerTableVolumeChooser.TABLE_VOLUME_CHOOSER, RandomVolumeChooser.class.getName());
    // Create table2 on namespace2
    String tableName2 = namespace2 + ".1";
    connector.tableOperations().create(tableName2);
    Table.ID tableID2 = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName2));
    // / Add 10 splits to the table
    addSplits(connector, tableName2);
    // Write some data to the table
    writeAndReadData(connector, tableName2);
    // Verify the new files are written to the Volumes specified
    verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), v1.toString() + "," + v2.toString() + "," + v4.toString());
}
Also used : RandomVolumeChooser(org.apache.accumulo.server.fs.RandomVolumeChooser) Connector(org.apache.accumulo.core.client.Connector) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) Test(org.junit.Test)

Example 14 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class VolumeIT method testRelativePaths.

@Test
public void testRelativePaths() throws Exception {
    List<String> expected = new ArrayList<>();
    Connector connector = getConnector();
    String tableName = getUniqueNames(1)[0];
    connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
    Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
    SortedSet<Text> partitions = new TreeSet<>();
    // with some splits
    for (String s : "c,g,k,p,s,v".split(",")) partitions.add(new Text(s));
    connector.tableOperations().addSplits(tableName, partitions);
    BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
    // create two files in each tablet
    String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
    for (String s : rows) {
        Mutation m = new Mutation(s);
        m.put("cf1", "cq1", "1");
        bw.addMutation(m);
        expected.add(s + ":cf1:cq1:1");
    }
    bw.flush();
    connector.tableOperations().flush(tableName, null, null, true);
    for (String s : rows) {
        Mutation m = new Mutation(s);
        m.put("cf1", "cq1", "2");
        bw.addMutation(m);
        expected.add(s + ":cf1:cq1:2");
    }
    bw.close();
    connector.tableOperations().flush(tableName, null, null, true);
    verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
    connector.tableOperations().offline(tableName, true);
    connector.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
    try (Scanner metaScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        metaScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
        BatchWriter mbw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        for (Entry<Key, Value> entry : metaScanner) {
            String cq = entry.getKey().getColumnQualifier().toString();
            if (cq.startsWith(v1.toString())) {
                Path path = new Path(cq);
                String relPath = "/" + path.getParent().getName() + "/" + path.getName();
                Mutation fileMut = new Mutation(entry.getKey().getRow());
                fileMut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
                fileMut.put(entry.getKey().getColumnFamily().toString(), relPath, entry.getValue().toString());
                mbw.addMutation(fileMut);
            }
        }
        mbw.close();
        connector.tableOperations().online(tableName, true);
        verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
        connector.tableOperations().compact(tableName, null, null, true, true);
        verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
        for (Entry<Key, Value> entry : metaScanner) {
            String cq = entry.getKey().getColumnQualifier().toString();
            Path path = new Path(cq);
            Assert.assertTrue("relative path not deleted " + path.toString(), path.depth() > 2);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 15 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class VolumeIT method verifyVolumesUsed.

private void verifyVolumesUsed(String tableName, boolean shouldExist, Path... paths) throws Exception {
    Connector conn = getConnector();
    List<String> expected = new ArrayList<>();
    for (int i = 0; i < 100; i++) {
        String row = String.format("%06d", i * 100 + 3);
        expected.add(row + ":cf1:cq1:1");
    }
    if (!conn.tableOperations().exists(tableName)) {
        Assert.assertFalse(shouldExist);
        writeData(tableName, conn);
        verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
        conn.tableOperations().flush(tableName, null, null, true);
    }
    verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
    Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
    try (Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(metaScanner);
        metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        metaScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
        int[] counts = new int[paths.length];
        outer: for (Entry<Key, Value> entry : metaScanner) {
            String cf = entry.getKey().getColumnFamily().toString();
            String cq = entry.getKey().getColumnQualifier().toString();
            String path;
            if (cf.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME.toString()))
                path = cq;
            else
                path = entry.getValue().toString();
            for (int i = 0; i < paths.length; i++) {
                if (path.startsWith(paths[i].toString())) {
                    counts[i]++;
                    continue outer;
                }
            }
            Assert.fail("Unexpected volume " + path);
        }
        // keep retrying until WAL state information in ZooKeeper stabilizes or until test times out
        retry: while (true) {
            Instance i = conn.getInstance();
            ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
            WalStateManager wals = new WalStateManager(i, zk);
            try {
                outer: for (Entry<Path, WalState> entry : wals.getAllState().entrySet()) {
                    for (Path path : paths) {
                        if (entry.getKey().toString().startsWith(path.toString())) {
                            continue outer;
                        }
                    }
                    log.warn("Unexpected volume " + entry.getKey() + " (" + entry.getValue() + ")");
                    continue retry;
                }
            } catch (WalMarkerException e) {
                Throwable cause = e.getCause();
                if (cause instanceof NoNodeException) {
                    // ignore WALs being cleaned up
                    continue retry;
                }
                throw e;
            }
            break;
        }
        // if a volume is chosen randomly for each tablet, then the probability that a volume will not be chosen for any tablet is ((num_volumes -
        // 1)/num_volumes)^num_tablets. For 100 tablets and 3 volumes the probability that only 2 volumes would be chosen is 2.46e-18
        int sum = 0;
        for (int count : counts) {
            Assert.assertTrue(count > 0);
            sum += count;
        }
        Assert.assertEquals(200, sum);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException) Instance(org.apache.accumulo.core.client.Instance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) ArrayList(java.util.ArrayList) ZooReaderWriter(org.apache.accumulo.server.zookeeper.ZooReaderWriter) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Entry(java.util.Map.Entry) WalStateManager(org.apache.accumulo.server.log.WalStateManager) WalState(org.apache.accumulo.server.log.WalStateManager.WalState) WalMarkerException(org.apache.accumulo.server.log.WalStateManager.WalMarkerException)

Aggregations

Connector (org.apache.accumulo.core.client.Connector)622 Test (org.junit.Test)415 BatchWriter (org.apache.accumulo.core.client.BatchWriter)171 Value (org.apache.accumulo.core.data.Value)162 Text (org.apache.hadoop.io.Text)160 Scanner (org.apache.accumulo.core.client.Scanner)158 Mutation (org.apache.accumulo.core.data.Mutation)152 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)143 Key (org.apache.accumulo.core.data.Key)139 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)101 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)87 AccumuloException (org.apache.accumulo.core.client.AccumuloException)83 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)75 Range (org.apache.accumulo.core.data.Range)74 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)65 Authorizations (org.apache.accumulo.core.security.Authorizations)60 HashSet (java.util.HashSet)57 Instance (org.apache.accumulo.core.client.Instance)55 ArrayList (java.util.ArrayList)53 Entry (java.util.Map.Entry)53