Search in sources :

Example 1 with AccumuloCluster

use of org.apache.accumulo.cluster.AccumuloCluster in project accumulo by apache.

the class CloneTestIT method testDeleteClone.

@Test
public void testDeleteClone() throws Exception {
    String[] tableNames = getUniqueNames(3);
    String table1 = tableNames[0];
    String table2 = tableNames[1];
    String table3 = tableNames[2];
    Connector c = getConnector();
    AccumuloCluster cluster = getCluster();
    Assume.assumeTrue(cluster instanceof MiniAccumuloClusterImpl);
    MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
    String rootPath = mac.getConfig().getDir().getAbsolutePath();
    // verify that deleting a new table removes the files
    c.tableOperations().create(table3);
    writeData(table3, c).close();
    c.tableOperations().flush(table3, null, null, true);
    // check for files
    FileSystem fs = getCluster().getFileSystem();
    String id = c.tableOperations().tableIdMap().get(table3);
    FileStatus[] status = fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id));
    assertTrue(status.length > 0);
    // verify disk usage
    List<DiskUsage> diskUsage = c.tableOperations().getDiskUsage(Collections.singleton(table3));
    assertEquals(1, diskUsage.size());
    assertTrue(diskUsage.get(0).getUsage() > 100);
    // delete the table
    c.tableOperations().delete(table3);
    // verify its gone from the file system
    Path tablePath = new Path(rootPath + "/accumulo/tables/" + id);
    if (fs.exists(tablePath)) {
        status = fs.listStatus(tablePath);
        assertTrue(status == null || status.length == 0);
    }
    c.tableOperations().create(table1);
    BatchWriter bw = writeData(table1, c);
    Map<String, String> props = new HashMap<>();
    props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
    Set<String> exclude = new HashSet<>();
    exclude.add(Property.TABLE_FILE_MAX.getKey());
    c.tableOperations().clone(table1, table2, true, props, exclude);
    Mutation m3 = new Mutation("009");
    m3.put("data", "x", "1");
    m3.put("data", "y", "2");
    bw.addMutation(m3);
    bw.close();
    // delete source table, should not affect clone
    c.tableOperations().delete(table1);
    checkData(table2, c);
    c.tableOperations().compact(table2, null, null, true, true);
    checkData(table2, c);
    c.tableOperations().delete(table2);
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) DiskUsage(org.apache.accumulo.core.client.admin.DiskUsage) FileSystem(org.apache.hadoop.fs.FileSystem) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with AccumuloCluster

use of org.apache.accumulo.cluster.AccumuloCluster in project accumulo by apache.

the class TableIT method test.

@Test
public void test() throws Exception {
    Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
    AccumuloCluster cluster = getCluster();
    MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
    String rootPath = mac.getConfig().getDir().getAbsolutePath();
    Connector c = getConnector();
    TableOperations to = c.tableOperations();
    String tableName = getUniqueNames(1)[0];
    to.create(tableName);
    TestIngest.Opts opts = new TestIngest.Opts();
    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
    ClientConfiguration clientConfig = getCluster().getClientConfig();
    if (clientConfig.hasSasl()) {
        opts.updateKerberosCredentials(clientConfig);
        vopts.updateKerberosCredentials(clientConfig);
    } else {
        opts.setPrincipal(getAdminPrincipal());
        vopts.setPrincipal(getAdminPrincipal());
    }
    opts.setTableName(tableName);
    TestIngest.ingest(c, opts, new BatchWriterOpts());
    to.flush(tableName, null, null, true);
    vopts.setTableName(tableName);
    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
    Table.ID id = Table.ID.of(to.tableIdMap().get(tableName));
    try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(new KeyExtent(id, null, null).toMetadataRange());
        s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        assertTrue(Iterators.size(s.iterator()) > 0);
        FileSystem fs = getCluster().getFileSystem();
        assertTrue(fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length > 0);
        to.delete(tableName);
        assertEquals(0, Iterators.size(s.iterator()));
        try {
            assertEquals(0, fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length);
        } catch (FileNotFoundException ex) {
        // that's fine, too
        }
        assertNull(to.tableIdMap().get(tableName));
        to.create(tableName);
        TestIngest.ingest(c, opts, new BatchWriterOpts());
        VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
        to.delete(tableName);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) FileNotFoundException(java.io.FileNotFoundException) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) TestIngest(org.apache.accumulo.test.TestIngest) VerifyIngest(org.apache.accumulo.test.VerifyIngest) FileSystem(org.apache.hadoop.fs.FileSystem) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Test(org.junit.Test)

Example 3 with AccumuloCluster

use of org.apache.accumulo.cluster.AccumuloCluster in project accumulo by apache.

the class ConditionalWriterIT method testTrace.

@Test
public void testTrace() throws Exception {
    // Need to add a getClientConfig() to AccumuloCluster
    Assume.assumeTrue(getClusterType() == ClusterType.MINI);
    Process tracer = null;
    Connector conn = getConnector();
    AccumuloCluster cluster = getCluster();
    MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
    if (!conn.tableOperations().exists("trace")) {
        tracer = mac.exec(TraceServer.class);
        while (!conn.tableOperations().exists("trace")) {
            sleepUninterruptibly(1, TimeUnit.SECONDS);
        }
    }
    String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    DistributedTrace.enable("localhost", "testTrace", mac.getClientConfig());
    sleepUninterruptibly(1, TimeUnit.SECONDS);
    Span root = Trace.on("traceTest");
    try (ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig())) {
        // mutation conditional on column tx:seq not exiting
        ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
        cm0.put("name", "last", "doe");
        cm0.put("name", "first", "john");
        cm0.put("tx", "seq", "1");
        Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
        root.stop();
    }
    try (Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY)) {
        scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
        loop: while (true) {
            final StringBuilder finalBuffer = new StringBuilder();
            int traceCount = TraceDump.printTrace(scanner, new Printer() {

                @Override
                public void print(final String line) {
                    try {
                        finalBuffer.append(line).append("\n");
                    } catch (Exception ex) {
                        throw new RuntimeException(ex);
                    }
                }
            });
            String traceOutput = finalBuffer.toString();
            log.info("Trace output:" + traceOutput);
            if (traceCount > 0) {
                int lastPos = 0;
                for (String part : "traceTest, startScan,startConditionalUpdate,conditionalUpdate,Check conditions,apply conditional mutations".split(",")) {
                    log.info("Looking in trace output for '" + part + "'");
                    int pos = traceOutput.indexOf(part);
                    if (-1 == pos) {
                        log.info("Trace output doesn't contain '" + part + "'");
                        Thread.sleep(1000);
                        break loop;
                    }
                    assertTrue("Did not find '" + part + "' in output", pos > 0);
                    assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
                    lastPos = pos;
                }
                break;
            } else {
                log.info("Ignoring trace output as traceCount not greater than zero: " + traceCount);
                Thread.sleep(1000);
            }
        }
        if (tracer != null) {
            tracer.destroy();
        }
    }
}
Also used : Condition(org.apache.accumulo.core.data.Condition) Connector(org.apache.accumulo.core.client.Connector) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) Printer(org.apache.accumulo.tracer.TraceDump.Printer) Span(org.apache.accumulo.core.trace.Span) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) TableExistsException(org.apache.accumulo.core.client.TableExistsException) TableDeletedException(org.apache.accumulo.core.client.TableDeletedException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ConditionalWriter(org.apache.accumulo.core.client.ConditionalWriter) ConditionalMutation(org.apache.accumulo.core.data.ConditionalMutation) TraceServer(org.apache.accumulo.tracer.TraceServer) ConditionalWriterConfig(org.apache.accumulo.core.client.ConditionalWriterConfig) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) Test(org.junit.Test)

Aggregations

AccumuloCluster (org.apache.accumulo.cluster.AccumuloCluster)3 Connector (org.apache.accumulo.core.client.Connector)3 MiniAccumuloClusterImpl (org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl)3 Test (org.junit.Test)3 Scanner (org.apache.accumulo.core.client.Scanner)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 BatchWriterOpts (org.apache.accumulo.core.cli.BatchWriterOpts)1 ScannerOpts (org.apache.accumulo.core.cli.ScannerOpts)1 AccumuloException (org.apache.accumulo.core.client.AccumuloException)1 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)1 BatchWriter (org.apache.accumulo.core.client.BatchWriter)1 ClientConfiguration (org.apache.accumulo.core.client.ClientConfiguration)1 ConditionalWriter (org.apache.accumulo.core.client.ConditionalWriter)1 ConditionalWriterConfig (org.apache.accumulo.core.client.ConditionalWriterConfig)1 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)1