Search in sources :

Example 1 with ClientOnDefaultTable

use of org.apache.accumulo.core.cli.ClientOnDefaultTable in project accumulo by apache.

the class TestRandomDeletes method main.

public static void main(String[] args) {
    ClientOnDefaultTable opts = new ClientOnDefaultTable("test_ingest");
    ScannerOpts scanOpts = new ScannerOpts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    opts.parseArgs(TestRandomDeletes.class.getName(), args, scanOpts, bwOpts);
    log.info("starting random delete test");
    try {
        long deleted = 0;
        String tableName = opts.getTableName();
        TreeSet<RowColumn> doomed = scanAll(opts, scanOpts, tableName);
        log.info("Got {} rows", doomed.size());
        long startTime = System.currentTimeMillis();
        while (true) {
            long half = scrambleDeleteHalfAndCheck(opts, scanOpts, bwOpts, tableName, doomed);
            deleted += half;
            if (half == 0)
                break;
        }
        long stopTime = System.currentTimeMillis();
        long elapsed = (stopTime - startTime) / 1000;
        log.info("deleted {} values in {} seconds", deleted, elapsed);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}
Also used : ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) ClientOnDefaultTable(org.apache.accumulo.core.cli.ClientOnDefaultTable) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts)

Example 2 with ClientOnDefaultTable

use of org.apache.accumulo.core.cli.ClientOnDefaultTable in project accumulo by apache.

the class ContinuousIngest method main.

public static void main(String[] args) throws Exception {
    ContinuousOpts opts = new ContinuousOpts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    ClientOnDefaultTable clientOpts = new ClientOnDefaultTable("ci");
    clientOpts.parseArgs(ContinuousIngest.class.getName(), args, bwOpts, opts);
    initVisibilities(opts);
    if (opts.min < 0 || opts.max < 0 || opts.max <= opts.min) {
        throw new IllegalArgumentException("bad min and max");
    }
    Connector conn = clientOpts.getConnector();
    if (!conn.tableOperations().exists(clientOpts.getTableName())) {
        throw new TableNotFoundException(null, clientOpts.getTableName(), "Consult the README and create the table before starting ingest.");
    }
    BatchWriter bw = conn.createBatchWriter(clientOpts.getTableName(), bwOpts.getBatchWriterConfig());
    bw = Trace.wrapAll(bw, new CountSampler(1024));
    Random r = new Random();
    byte[] ingestInstanceId = UUID.randomUUID().toString().getBytes(UTF_8);
    System.out.printf("UUID %d %s%n", System.currentTimeMillis(), new String(ingestInstanceId, UTF_8));
    long count = 0;
    final int flushInterval = 1000000;
    final int maxDepth = 25;
    // always want to point back to flushed data. This way the previous item should
    // always exist in accumulo when verifying data. To do this make insert N point
    // back to the row from insert (N - flushInterval). The array below is used to keep
    // track of this.
    long[] prevRows = new long[flushInterval];
    long[] firstRows = new long[flushInterval];
    int[] firstColFams = new int[flushInterval];
    int[] firstColQuals = new int[flushInterval];
    long lastFlushTime = System.currentTimeMillis();
    out: while (true) {
        // generate first set of nodes
        ColumnVisibility cv = getVisibility(r);
        for (int index = 0; index < flushInterval; index++) {
            long rowLong = genLong(opts.min, opts.max, r);
            prevRows[index] = rowLong;
            firstRows[index] = rowLong;
            int cf = r.nextInt(opts.maxColF);
            int cq = r.nextInt(opts.maxColQ);
            firstColFams[index] = cf;
            firstColQuals[index] = cq;
            Mutation m = genMutation(rowLong, cf, cq, cv, ingestInstanceId, count, null, r, opts.checksum);
            count++;
            bw.addMutation(m);
        }
        lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
        if (count >= opts.num)
            break out;
        // generate subsequent sets of nodes that link to previous set of nodes
        for (int depth = 1; depth < maxDepth; depth++) {
            for (int index = 0; index < flushInterval; index++) {
                long rowLong = genLong(opts.min, opts.max, r);
                byte[] prevRow = genRow(prevRows[index]);
                prevRows[index] = rowLong;
                Mutation m = genMutation(rowLong, r.nextInt(opts.maxColF), r.nextInt(opts.maxColQ), cv, ingestInstanceId, count, prevRow, r, opts.checksum);
                count++;
                bw.addMutation(m);
            }
            lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
            if (count >= opts.num)
                break out;
        }
        // point to something
        for (int index = 0; index < flushInterval - 1; index++) {
            Mutation m = genMutation(firstRows[index], firstColFams[index], firstColQuals[index], cv, ingestInstanceId, count, genRow(prevRows[index + 1]), r, opts.checksum);
            count++;
            bw.addMutation(m);
        }
        lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
        if (count >= opts.num)
            break out;
    }
    bw.close();
    clientOpts.stopTracing();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ClientOnDefaultTable(org.apache.accumulo.core.cli.ClientOnDefaultTable) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) CountSampler(org.apache.accumulo.core.trace.CountSampler) Random(java.util.Random) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) BatchWriter(org.apache.accumulo.core.client.BatchWriter) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) Mutation(org.apache.accumulo.core.data.Mutation)

Aggregations

BatchWriterOpts (org.apache.accumulo.core.cli.BatchWriterOpts)2 ClientOnDefaultTable (org.apache.accumulo.core.cli.ClientOnDefaultTable)2 Random (java.util.Random)1 ScannerOpts (org.apache.accumulo.core.cli.ScannerOpts)1 BatchWriter (org.apache.accumulo.core.client.BatchWriter)1 Connector (org.apache.accumulo.core.client.Connector)1 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)1 Mutation (org.apache.accumulo.core.data.Mutation)1 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)1 CountSampler (org.apache.accumulo.core.trace.CountSampler)1