Search in sources :

Example 16 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class VolumeIT method writeData.

private void writeData(String tableName, Connector conn) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException {
    TreeSet<Text> splits = new TreeSet<>();
    for (int i = 1; i < 100; i++) {
        splits.add(new Text(String.format("%06d", i * 100)));
    }
    conn.tableOperations().create(tableName);
    conn.tableOperations().addSplits(tableName, splits);
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    for (int i = 0; i < 100; i++) {
        String row = String.format("%06d", i * 100 + 3);
        Mutation m = new Mutation(row);
        m.put("cf1", "cq1", "1");
        bw.addMutation(m);
    }
    bw.close();
}
Also used : TreeSet(java.util.TreeSet) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 17 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class TracerRecoversAfterOfflineTableIT method test.

@Test
public void test() throws Exception {
    Process tracer = null;
    Connector conn = getConnector();
    if (!conn.tableOperations().exists("trace")) {
        MiniAccumuloClusterImpl mac = cluster;
        tracer = mac.exec(TraceServer.class);
        while (!conn.tableOperations().exists("trace")) {
            sleepUninterruptibly(1, TimeUnit.SECONDS);
        }
        sleepUninterruptibly(5, TimeUnit.SECONDS);
    }
    log.info("Taking table offline");
    conn.tableOperations().offline("trace", true);
    String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    log.info("Start a distributed trace span");
    DistributedTrace.enable("localhost", "testTrace", getClientConfig());
    Span root = Trace.on("traceTest");
    BatchWriter bw = conn.createBatchWriter(tableName, null);
    Mutation m = new Mutation("m");
    m.put("a", "b", "c");
    bw.addMutation(m);
    bw.close();
    root.stop();
    log.info("Bringing trace table back online");
    conn.tableOperations().online("trace", true);
    log.info("Trace table is online, should be able to find trace");
    try (Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY)) {
        scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
        while (true) {
            final StringBuilder finalBuffer = new StringBuilder();
            int traceCount = TraceDump.printTrace(scanner, new Printer() {

                @Override
                public void print(final String line) {
                    try {
                        finalBuffer.append(line).append("\n");
                    } catch (Exception ex) {
                        throw new RuntimeException(ex);
                    }
                }
            });
            String traceOutput = finalBuffer.toString();
            log.info("Trace output:{}", traceOutput);
            if (traceCount > 0) {
                int lastPos = 0;
                for (String part : "traceTest,close,binMutations".split(",")) {
                    log.info("Looking in trace output for '{}'", part);
                    int pos = traceOutput.indexOf(part);
                    assertTrue("Did not find '" + part + "' in output", pos > 0);
                    assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
                    lastPos = pos;
                }
                break;
            } else {
                log.info("Ignoring trace output as traceCount not greater than zero: {}", traceCount);
                Thread.sleep(1000);
            }
        }
        if (tracer != null) {
            tracer.destroy();
        }
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) Printer(org.apache.accumulo.tracer.TraceDump.Printer) Span(org.apache.accumulo.core.trace.Span) TraceServer(org.apache.accumulo.tracer.TraceServer) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) Test(org.junit.Test)

Example 18 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class UserCompactionStrategyIT method writeFlush.

private void writeFlush(Connector conn, String tablename, String row) throws Exception {
    BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
    Mutation m = new Mutation(row);
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    conn.tableOperations().flush(tablename, null, null, true);
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 19 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class VerifySerialRecoveryIT method testSerializedRecovery.

@Test(timeout = 4 * 60 * 1000)
public void testSerializedRecovery() throws Exception {
    // make a table with many splits
    String tableName = getUniqueNames(1)[0];
    Connector c = getConnector();
    c.tableOperations().create(tableName);
    SortedSet<Text> splits = new TreeSet<>();
    for (int i = 0; i < 200; i++) {
        splits.add(new Text(randomHex(8)));
    }
    c.tableOperations().addSplits(tableName, splits);
    // load data to give the recovery something to do
    BatchWriter bw = c.createBatchWriter(tableName, null);
    for (int i = 0; i < 50000; i++) {
        Mutation m = new Mutation(randomHex(8));
        m.put("", "", "");
        bw.addMutation(m);
    }
    bw.close();
    // kill the tserver
    for (ProcessReference ref : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) getCluster().killProcess(ServerType.TABLET_SERVER, ref);
    final Process ts = cluster.exec(TabletServer.class);
    // wait for recovery
    Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
    ts.waitFor();
    String result = FunctionalTestUtils.readAll(cluster, TabletServer.class, ts);
    for (String line : result.split("\n")) {
        System.out.println(line);
    }
    // walk through the output, verifying that only a single normal recovery was running at one time
    boolean started = false;
    int recoveries = 0;
    for (String line : result.split("\n")) {
        // ignore metadata tables
        if (line.contains("!0") || line.contains("+r"))
            continue;
        if (line.contains("Starting Write-Ahead Log")) {
            assertFalse(started);
            started = true;
            recoveries++;
        }
        if (line.contains("Write-Ahead Log recovery complete")) {
            assertTrue(started);
            started = false;
        }
    }
    assertFalse(started);
    assertTrue(recoveries > 0);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) TreeSet(java.util.TreeSet) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 20 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class BadIteratorMincIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    IteratorSetting is = new IteratorSetting(30, BadIterator.class);
    c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation(new Text("r1"));
    m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
    bw.addMutation(m);
    bw.close();
    c.tableOperations().flush(tableName, null, null, false);
    sleepUninterruptibly(1, TimeUnit.SECONDS);
    // minc should fail, so there should be no files
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
    // try to scan table
    try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
        int count = Iterators.size(scanner.iterator());
        assertEquals("Did not see expected # entries " + count, 1, count);
        // remove the bad iterator
        c.tableOperations().removeIterator(tableName, BadIterator.class.getSimpleName(), EnumSet.of(IteratorScope.minc));
        sleepUninterruptibly(5, TimeUnit.SECONDS);
        // minc should complete
        FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
        count = Iterators.size(scanner.iterator());
        if (count != 1)
            throw new Exception("Did not see expected # entries " + count);
        // now try putting bad iterator back and deleting the table
        c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
        bw = c.createBatchWriter(tableName, new BatchWriterConfig());
        m = new Mutation(new Text("r2"));
        m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
        bw.addMutation(m);
        bw.close();
        // make sure property is given time to propagate
        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
        c.tableOperations().flush(tableName, null, null, false);
        // make sure the flush has time to start
        sleepUninterruptibly(1, TimeUnit.SECONDS);
        // this should not hang
        c.tableOperations().delete(tableName);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Aggregations

BatchWriter (org.apache.accumulo.core.client.BatchWriter)402 Mutation (org.apache.accumulo.core.data.Mutation)360 Test (org.junit.Test)264 Value (org.apache.accumulo.core.data.Value)250 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)246 Text (org.apache.hadoop.io.Text)194 Key (org.apache.accumulo.core.data.Key)179 Scanner (org.apache.accumulo.core.client.Scanner)174 Connector (org.apache.accumulo.core.client.Connector)169 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)81 Authorizations (org.apache.accumulo.core.security.Authorizations)68 Range (org.apache.accumulo.core.data.Range)61 Entry (java.util.Map.Entry)51 Map (java.util.Map)50 BatchScanner (org.apache.accumulo.core.client.BatchScanner)46 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)44 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)40 HashMap (java.util.HashMap)38 ArrayList (java.util.ArrayList)36 Status (org.apache.accumulo.server.replication.proto.Replication.Status)32