use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class WriteAheadLogIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "750K");
TestIngest.Opts opts = new TestIngest.Opts();
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
opts.setTableName(tableName);
ClientConfiguration clientConfig = cluster.getClientConfig();
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
vopts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
vopts.setPrincipal(getAdminPrincipal());
}
TestIngest.ingest(c, opts, new BatchWriterOpts());
vopts.setTableName(tableName);
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class WriteLotsIT method writeLots.
@Test
public void writeLots() throws Exception {
final Connector c = getConnector();
final String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
final AtomicReference<Exception> ref = new AtomicReference<>();
final ClientConfiguration clientConfig = getCluster().getClientConfig();
final int THREADS = 5;
ThreadPoolExecutor tpe = new ThreadPoolExecutor(0, THREADS, 0, TimeUnit.SECONDS, new ArrayBlockingQueue<>(THREADS));
for (int i = 0; i < THREADS; i++) {
final int index = i;
Runnable r = new Runnable() {
@Override
public void run() {
try {
TestIngest.Opts opts = new TestIngest.Opts();
opts.startRow = index * 10000;
opts.rows = 10000;
opts.setTableName(tableName);
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
}
BatchWriterOpts bwOpts = new BatchWriterOpts();
bwOpts.batchMemory = 1024L * 1024;
bwOpts.batchThreads = 2;
TestIngest.ingest(c, opts, new BatchWriterOpts());
} catch (Exception ex) {
ref.set(ex);
}
}
};
tpe.execute(r);
}
tpe.shutdown();
tpe.awaitTermination(90, TimeUnit.SECONDS);
if (ref.get() != null) {
throw ref.get();
}
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
vopts.rows = 10000 * THREADS;
vopts.setTableName(tableName);
if (clientConfig.hasSasl()) {
vopts.updateKerberosCredentials(clientConfig);
} else {
vopts.setPrincipal(getAdminPrincipal());
}
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class RenameIT method renameTest.
@Test
public void renameTest() throws Exception {
String[] tableNames = getUniqueNames(2);
String name1 = tableNames[0];
String name2 = tableNames[1];
BatchWriterOpts bwOpts = new BatchWriterOpts();
ScannerOpts scanOpts = new ScannerOpts();
TestIngest.Opts opts = new TestIngest.Opts();
opts.createTable = true;
opts.setTableName(name1);
final ClientConfiguration clientConfig = cluster.getClientConfig();
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
}
Connector c = getConnector();
TestIngest.ingest(c, opts, bwOpts);
c.tableOperations().rename(name1, name2);
TestIngest.ingest(c, opts, bwOpts);
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
if (clientConfig.hasSasl()) {
vopts.updateKerberosCredentials(clientConfig);
} else {
vopts.setPrincipal(getAdminPrincipal());
}
vopts.setTableName(name2);
VerifyIngest.verifyIngest(c, vopts, scanOpts);
c.tableOperations().delete(name1);
c.tableOperations().rename(name2, name1);
vopts.setTableName(name1);
VerifyIngest.verifyIngest(c, vopts, scanOpts);
FunctionalTestUtils.assertNoDanglingFateLocks(getConnector().getInstance(), getCluster());
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class ContinuousIngest method main.
public static void main(String[] args) throws Exception {
ContinuousOpts opts = new ContinuousOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
ClientOnDefaultTable clientOpts = new ClientOnDefaultTable("ci");
clientOpts.parseArgs(ContinuousIngest.class.getName(), args, bwOpts, opts);
initVisibilities(opts);
if (opts.min < 0 || opts.max < 0 || opts.max <= opts.min) {
throw new IllegalArgumentException("bad min and max");
}
Connector conn = clientOpts.getConnector();
if (!conn.tableOperations().exists(clientOpts.getTableName())) {
throw new TableNotFoundException(null, clientOpts.getTableName(), "Consult the README and create the table before starting ingest.");
}
BatchWriter bw = conn.createBatchWriter(clientOpts.getTableName(), bwOpts.getBatchWriterConfig());
bw = Trace.wrapAll(bw, new CountSampler(1024));
Random r = new Random();
byte[] ingestInstanceId = UUID.randomUUID().toString().getBytes(UTF_8);
System.out.printf("UUID %d %s%n", System.currentTimeMillis(), new String(ingestInstanceId, UTF_8));
long count = 0;
final int flushInterval = 1000000;
final int maxDepth = 25;
// always want to point back to flushed data. This way the previous item should
// always exist in accumulo when verifying data. To do this make insert N point
// back to the row from insert (N - flushInterval). The array below is used to keep
// track of this.
long[] prevRows = new long[flushInterval];
long[] firstRows = new long[flushInterval];
int[] firstColFams = new int[flushInterval];
int[] firstColQuals = new int[flushInterval];
long lastFlushTime = System.currentTimeMillis();
out: while (true) {
// generate first set of nodes
ColumnVisibility cv = getVisibility(r);
for (int index = 0; index < flushInterval; index++) {
long rowLong = genLong(opts.min, opts.max, r);
prevRows[index] = rowLong;
firstRows[index] = rowLong;
int cf = r.nextInt(opts.maxColF);
int cq = r.nextInt(opts.maxColQ);
firstColFams[index] = cf;
firstColQuals[index] = cq;
Mutation m = genMutation(rowLong, cf, cq, cv, ingestInstanceId, count, null, r, opts.checksum);
count++;
bw.addMutation(m);
}
lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
if (count >= opts.num)
break out;
// generate subsequent sets of nodes that link to previous set of nodes
for (int depth = 1; depth < maxDepth; depth++) {
for (int index = 0; index < flushInterval; index++) {
long rowLong = genLong(opts.min, opts.max, r);
byte[] prevRow = genRow(prevRows[index]);
prevRows[index] = rowLong;
Mutation m = genMutation(rowLong, r.nextInt(opts.maxColF), r.nextInt(opts.maxColQ), cv, ingestInstanceId, count, prevRow, r, opts.checksum);
count++;
bw.addMutation(m);
}
lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
if (count >= opts.num)
break out;
}
// point to something
for (int index = 0; index < flushInterval - 1; index++) {
Mutation m = genMutation(firstRows[index], firstColFams[index], firstColQuals[index], cv, ingestInstanceId, count, genRow(prevRows[index + 1]), r, opts.checksum);
count++;
bw.addMutation(m);
}
lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
if (count >= opts.num)
break out;
}
bw.close();
clientOpts.stopTracing();
}
use of org.apache.accumulo.core.cli.BatchWriterOpts in project accumulo by apache.
the class RandomWriter method main.
public static void main(String[] args) throws Exception {
Opts opts = new Opts(table_name);
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(RandomWriter.class.getName(), args, bwOpts);
long start = System.currentTimeMillis();
log.info("starting at {} for user {}", start, opts.getPrincipal());
try {
Connector connector = opts.getConnector();
BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
log.info("Writing {} mutations...", opts.count);
bw.addMutations(new RandomMutationGenerator(opts.count));
bw.close();
} catch (Exception e) {
log.error("{}", e.getMessage(), e);
throw e;
}
long stop = System.currentTimeMillis();
log.info("stopping at {}", stop);
log.info("elapsed: {}", (((double) stop - (double) start) / 1000.0));
}
Aggregations