use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class CombinerIT method aggregationTest.
@Test
public void aggregationTest() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
SummingCombiner.setEncodingType(setting, Type.STRING);
SummingCombiner.setColumns(setting, Collections.singletonList(new IteratorSetting.Column("cf")));
c.tableOperations().attachIterator(tableName, setting);
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
for (int i = 0; i < 10; i++) {
Mutation m = new Mutation("row1");
m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes());
bw.addMutation(m);
}
bw.close();
checkSum(tableName, c);
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class ConcurrencyIT method runTest.
static void runTest(Connector c, String tableName) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException, Exception, InterruptedException {
c.tableOperations().create(tableName);
IteratorSetting is = new IteratorSetting(10, SlowIterator.class);
SlowIterator.setSleepTime(is, 50);
c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc, IteratorScope.majc));
c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
for (int i = 0; i < 50; i++) {
Mutation m = new Mutation(new Text(String.format("%06d", i)));
m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
bw.addMutation(m);
}
bw.flush();
ScanTask st0 = new ScanTask(c, tableName, 300);
st0.start();
ScanTask st1 = new ScanTask(c, tableName, 100);
st1.start();
sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
c.tableOperations().flush(tableName, null, null, true);
for (int i = 0; i < 50; i++) {
Mutation m = new Mutation(new Text(String.format("%06d", i)));
m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
bw.addMutation(m);
}
bw.flush();
ScanTask st2 = new ScanTask(c, tableName, 100);
st2.start();
st1.join();
st2.join();
if (st1.count != 50)
throw new Exception("Thread 1 did not see 50, saw " + st1.count);
if (st2.count != 50)
throw new Exception("Thread 2 did not see 50, saw " + st2.count);
ScanTask st3 = new ScanTask(c, tableName, 150);
st3.start();
sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
c.tableOperations().flush(tableName, null, null, false);
st3.join();
if (st3.count != 50)
throw new Exception("Thread 3 did not see 50, saw " + st3.count);
st0.join();
if (st0.count != 50)
throw new Exception("Thread 0 did not see 50, saw " + st0.count);
bw.close();
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class ConfigurableCompactionIT method writeFlush.
private void writeFlush(Connector conn, String tablename, String row) throws Exception {
BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
Mutation m = new Mutation(row);
m.put("", "", "");
bw.addMutation(m);
bw.close();
conn.tableOperations().flush(tablename, null, null, true);
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class VolumeChooserIT method writeDataToTable.
public static void writeDataToTable(Connector connector, String tableName) throws Exception {
// Write some data to the table
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
for (String s : rows) {
Mutation m = new Mutation(new Text(s));
m.put(EMPTY, EMPTY, EMPTY_VALUE);
bw.addMutation(m);
}
bw.close();
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class VolumeIT method testRelativePaths.
@Test
public void testRelativePaths() throws Exception {
List<String> expected = new ArrayList<>();
Connector connector = getConnector();
String tableName = getUniqueNames(1)[0];
connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
SortedSet<Text> partitions = new TreeSet<>();
// with some splits
for (String s : "c,g,k,p,s,v".split(",")) partitions.add(new Text(s));
connector.tableOperations().addSplits(tableName, partitions);
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
// create two files in each tablet
String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
for (String s : rows) {
Mutation m = new Mutation(s);
m.put("cf1", "cq1", "1");
bw.addMutation(m);
expected.add(s + ":cf1:cq1:1");
}
bw.flush();
connector.tableOperations().flush(tableName, null, null, true);
for (String s : rows) {
Mutation m = new Mutation(s);
m.put("cf1", "cq1", "2");
bw.addMutation(m);
expected.add(s + ":cf1:cq1:2");
}
bw.close();
connector.tableOperations().flush(tableName, null, null, true);
verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
connector.tableOperations().offline(tableName, true);
connector.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
try (Scanner metaScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
metaScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
BatchWriter mbw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (Entry<Key, Value> entry : metaScanner) {
String cq = entry.getKey().getColumnQualifier().toString();
if (cq.startsWith(v1.toString())) {
Path path = new Path(cq);
String relPath = "/" + path.getParent().getName() + "/" + path.getName();
Mutation fileMut = new Mutation(entry.getKey().getRow());
fileMut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
fileMut.put(entry.getKey().getColumnFamily().toString(), relPath, entry.getValue().toString());
mbw.addMutation(fileMut);
}
}
mbw.close();
connector.tableOperations().online(tableName, true);
verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
connector.tableOperations().compact(tableName, null, null, true, true);
verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
for (Entry<Key, Value> entry : metaScanner) {
String cq = entry.getKey().getColumnQualifier().toString();
Path path = new Path(cq);
Assert.assertTrue("relative path not deleted " + path.toString(), path.depth() > 2);
}
}
}
Aggregations