use of org.apache.accumulo.examples.cli.BatchWriterOpts in project accumulo-examples by apache.
the class Ingest method main.
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(Ingest.class.getName(), args, bwOpts);
Connector conn = opts.getConnector();
if (!conn.tableOperations().exists(opts.nameTable))
conn.tableOperations().create(opts.nameTable);
if (!conn.tableOperations().exists(opts.indexTable))
conn.tableOperations().create(opts.indexTable);
if (!conn.tableOperations().exists(opts.dataTable)) {
conn.tableOperations().create(opts.dataTable);
conn.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
}
BatchWriter dirBW = conn.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
BatchWriter indexBW = conn.createBatchWriter(opts.indexTable, bwOpts.getBatchWriterConfig());
BatchWriter dataBW = conn.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
for (String dir : opts.directories) {
recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
// fill in parent directory info
int slashIndex = -1;
while ((slashIndex = dir.lastIndexOf("/")) > 0) {
dir = dir.substring(0, slashIndex);
ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
}
}
ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW);
dirBW.close();
indexBW.close();
dataBW.close();
}
use of org.apache.accumulo.examples.cli.BatchWriterOpts in project accumulo-examples by apache.
the class RowOperations method main.
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException {
ClientOpts opts = new ClientOpts();
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(RowOperations.class.getName(), args, scanOpts, bwOpts);
// First the setup work
connector = opts.getConnector();
// lets create an example table
connector.tableOperations().create(tableName);
// lets create 3 rows of information
Text row1 = new Text("row1");
Text row2 = new Text("row2");
Text row3 = new Text("row3");
// Which means 3 different mutations
Mutation mut1 = new Mutation(row1);
Mutation mut2 = new Mutation(row2);
Mutation mut3 = new Mutation(row3);
// And we'll put 4 columns in each row
Text col1 = new Text("1");
Text col2 = new Text("2");
Text col3 = new Text("3");
Text col4 = new Text("4");
// Now we'll add them to the mutations
mut1.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut1.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut1.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut1.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut2.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut2.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut2.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut2.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut3.put(new Text("column"), col1, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut3.put(new Text("column"), col2, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut3.put(new Text("column"), col3, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
mut3.put(new Text("column"), col4, System.currentTimeMillis(), new Value("This is the value for this key".getBytes(UTF_8)));
// Now we'll make a Batch Writer
bw = connector.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());
// And add the mutations
bw.addMutation(mut1);
bw.addMutation(mut2);
bw.addMutation(mut3);
// Force a send
bw.flush();
// Now lets look at the rows
Scanner rowThree = getRow(scanOpts, new Text("row3"));
Scanner rowTwo = getRow(scanOpts, new Text("row2"));
Scanner rowOne = getRow(scanOpts, new Text("row1"));
// And print them
log.info("This is everything");
printRow(rowOne);
printRow(rowTwo);
printRow(rowThree);
System.out.flush();
// Now lets delete rowTwo with the iterator
rowTwo = getRow(scanOpts, new Text("row2"));
deleteRow(rowTwo);
// Now lets look at the rows again
rowThree = getRow(scanOpts, new Text("row3"));
rowTwo = getRow(scanOpts, new Text("row2"));
rowOne = getRow(scanOpts, new Text("row1"));
// And print them
log.info("This is row1 and row3");
printRow(rowOne);
printRow(rowTwo);
printRow(rowThree);
System.out.flush();
// Should only see the two rows
// Now lets delete rowOne without passing in the iterator
deleteRow(scanOpts, row1);
// Now lets look at the rows one last time
rowThree = getRow(scanOpts, new Text("row3"));
rowTwo = getRow(scanOpts, new Text("row2"));
rowOne = getRow(scanOpts, new Text("row1"));
// And print them
log.info("This is just row3");
printRow(rowOne);
printRow(rowTwo);
printRow(rowThree);
System.out.flush();
// Should only see rowThree
// Always close your batchwriter
bw.close();
// and lets clean up our mess
connector.tableOperations().delete(tableName);
// fin~
}
use of org.apache.accumulo.examples.cli.BatchWriterOpts in project accumulo-examples by apache.
the class SequentialBatchWriter method main.
/**
* Writes a specified number of entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting at a specified number.
* The column families will be "foo" and column qualifiers will be "1". The values will be random byte arrays of a specified size.
*/
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(SequentialBatchWriter.class.getName(), args, bwOpts);
Connector connector = opts.getConnector();
BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
long end = opts.start + opts.num;
for (long i = opts.start; i < end; i++) {
Mutation m = RandomBatchWriter.createMutation(i, opts.valueSize, opts.vis);
bw.addMutation(m);
}
bw.close();
}
use of org.apache.accumulo.examples.cli.BatchWriterOpts in project accumulo-examples by apache.
the class CountIT method test.
@Test
public void test() throws Exception {
Scanner scanner = conn.createScanner(tableName, new Authorizations());
scanner.fetchColumn(new Text("dir"), new Text("counts"));
assertFalse(scanner.iterator().hasNext());
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
FileCount fc = new FileCount(conn, tableName, Authorizations.EMPTY, new ColumnVisibility(), scanOpts, bwOpts);
fc.run();
ArrayList<Pair<String, String>> expected = new ArrayList<>();
expected.add(new Pair<>(QueryUtil.getRow("").toString(), "1,0,3,3"));
expected.add(new Pair<>(QueryUtil.getRow("/local").toString(), "2,1,2,3"));
expected.add(new Pair<>(QueryUtil.getRow("/local/user1").toString(), "0,2,0,2"));
expected.add(new Pair<>(QueryUtil.getRow("/local/user2").toString(), "0,0,0,0"));
int i = 0;
for (Entry<Key, Value> e : scanner) {
assertEquals(e.getKey().getRow().toString(), expected.get(i).getFirst());
assertEquals(e.getValue().toString(), expected.get(i).getSecond());
i++;
}
assertEquals(i, expected.size());
}
use of org.apache.accumulo.examples.cli.BatchWriterOpts in project accumulo-examples by apache.
the class RandomBatchWriter method main.
/**
* Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
*/
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
Opts opts = new Opts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
if ((opts.max - opts.min) < 1L * opts.num) {
// right-side multiplied by 1L to convert to long in a way that doesn't trigger FindBugs
System.err.println(String.format("You must specify a min and a max that allow for at least num possible values. " + "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.", opts.num, opts.min, opts.max, (opts.max - opts.min)));
System.exit(1);
}
Random r;
if (opts.seed == null)
r = new Random();
else {
r = new Random(opts.seed);
}
Connector connector = opts.getConnector();
BatchWriter bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
// reuse the ColumnVisibility object to improve performance
ColumnVisibility cv = opts.visiblity;
// Generate num unique row ids in the given range
HashSet<Long> rowids = new HashSet<>(opts.num);
while (rowids.size() < opts.num) {
rowids.add((abs(r.nextLong()) % (opts.max - opts.min)) + opts.min);
}
for (long rowid : rowids) {
Mutation m = createMutation(rowid, opts.size, cv);
bw.addMutation(m);
}
try {
bw.close();
} catch (MutationsRejectedException e) {
if (e.getSecurityErrorCodes().size() > 0) {
HashMap<String, Set<SecurityErrorCode>> tables = new HashMap<>();
for (Entry<TabletId, Set<SecurityErrorCode>> ke : e.getSecurityErrorCodes().entrySet()) {
String tableId = ke.getKey().getTableId().toString();
Set<SecurityErrorCode> secCodes = tables.get(tableId);
if (secCodes == null) {
secCodes = new HashSet<>();
tables.put(tableId, secCodes);
}
secCodes.addAll(ke.getValue());
}
System.err.println("ERROR : Not authorized to write to tables : " + tables);
}
if (e.getConstraintViolationSummaries().size() > 0) {
System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
}
System.exit(1);
}
}
Aggregations