use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class BadIteratorMincIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
IteratorSetting is = new IteratorSetting(30, BadIterator.class);
c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation(new Text("r1"));
m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
bw.addMutation(m);
bw.close();
c.tableOperations().flush(tableName, null, null, false);
sleepUninterruptibly(1, TimeUnit.SECONDS);
// minc should fail, so there should be no files
FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
// try to scan table
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
int count = Iterators.size(scanner.iterator());
assertEquals("Did not see expected # entries " + count, 1, count);
// remove the bad iterator
c.tableOperations().removeIterator(tableName, BadIterator.class.getSimpleName(), EnumSet.of(IteratorScope.minc));
sleepUninterruptibly(5, TimeUnit.SECONDS);
// minc should complete
FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
count = Iterators.size(scanner.iterator());
if (count != 1)
throw new Exception("Did not see expected # entries " + count);
// now try putting bad iterator back and deleting the table
c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
bw = c.createBatchWriter(tableName, new BatchWriterConfig());
m = new Mutation(new Text("r2"));
m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
bw.addMutation(m);
bw.close();
// make sure property is given time to propagate
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
c.tableOperations().flush(tableName, null, null, false);
// make sure the flush has time to start
sleepUninterruptibly(1, TimeUnit.SECONDS);
// this should not hang
c.tableOperations().delete(tableName);
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class BatchWriterFlushIT method runFlushTest.
private void runFlushTest(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException, Exception {
BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
try (Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
Random r = new Random();
for (int i = 0; i < 4; i++) {
for (int j = 0; j < NUM_TO_FLUSH; j++) {
int row = i * NUM_TO_FLUSH + j;
Mutation m = new Mutation(new Text(String.format("r_%10d", row)));
m.put(new Text("cf"), new Text("cq"), new Value(("" + row).getBytes()));
bw.addMutation(m);
}
bw.flush();
for (int k = 0; k < 10; k++) {
int rowToLookup = r.nextInt(NUM_TO_FLUSH) + i * NUM_TO_FLUSH;
scanner.setRange(new Range(new Text(String.format("r_%10d", rowToLookup))));
Iterator<Entry<Key, Value>> iter = scanner.iterator();
if (!iter.hasNext())
throw new Exception(" row " + rowToLookup + " not found after flush");
Entry<Key, Value> entry = iter.next();
if (iter.hasNext())
throw new Exception("Scanner returned too much");
verifyEntry(rowToLookup, entry);
}
// scan all data just flushed
scanner.setRange(new Range(new Text(String.format("r_%10d", i * NUM_TO_FLUSH)), true, new Text(String.format("r_%10d", (i + 1) * NUM_TO_FLUSH)), false));
Iterator<Entry<Key, Value>> iter = scanner.iterator();
for (int j = 0; j < NUM_TO_FLUSH; j++) {
int row = i * NUM_TO_FLUSH + j;
if (!iter.hasNext())
throw new Exception("Scan stopped permaturely at " + row);
Entry<Key, Value> entry = iter.next();
verifyEntry(row, entry);
}
if (iter.hasNext())
throw new Exception("Scanner returned too much");
}
bw.close();
// test adding a mutation to a closed batch writer
boolean caught = false;
try {
bw.addMutation(new Mutation(new Text("foobar")));
} catch (IllegalStateException ise) {
caught = true;
}
if (!caught) {
throw new Exception("Adding to closed batch writer did not fail");
}
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class DeletedTablesDontFlushIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
IteratorSetting setting = new IteratorSetting(100, SlowIterator.class);
SlowIterator.setSleepTime(setting, 1000);
c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.minc));
// let the configuration change propagate through zookeeper
UtilWaitThread.sleep(1000);
Mutation m = new Mutation("xyzzy");
for (int i = 0; i < 100; i++) {
m.put("cf", "" + i, new Value(new byte[] {}));
}
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
bw.addMutation(m);
bw.close();
// should go fast
c.tableOperations().delete(tableName);
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class CreateAndUseIT method verifyDataIsPresent.
@Test
public void verifyDataIsPresent() throws Exception {
Text cf = new Text("cf1");
Text cq = new Text("cq1");
String tableName = getUniqueNames(1)[0];
getConnector().tableOperations().create(tableName);
getConnector().tableOperations().addSplits(tableName, splits);
BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
for (int i = 1; i < 257; i++) {
Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 16)));
m.put(cf, cq, new Value(Integer.toString(i).getBytes(UTF_8)));
bw.addMutation(m);
}
bw.close();
try (Scanner scanner1 = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
int ei = 1;
for (Entry<Key, Value> entry : scanner1) {
Assert.assertEquals(String.format("%08x", (ei << 8) - 16), entry.getKey().getRow().toString());
Assert.assertEquals(Integer.toString(ei), entry.getValue().toString());
ei++;
}
Assert.assertEquals("Did not see expected number of rows", 257, ei);
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class DeleteEverythingIT method run.
@Test
public void run() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation(new Text("foo"));
m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(UTF_8)));
bw.addMutation(m);
bw.flush();
getConnector().tableOperations().flush(tableName, null, null, true);
FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
m = new Mutation(new Text("foo"));
m.putDelete(new Text("bar"), new Text("1910"));
bw.addMutation(m);
bw.flush();
try (Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
scanner.setRange(new Range());
int count = Iterators.size(scanner.iterator());
assertEquals("count == " + count, 0, count);
getConnector().tableOperations().flush(tableName, null, null, true);
getConnector().tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
sleepUninterruptibly(4, TimeUnit.SECONDS);
FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
bw.close();
count = Iterables.size(scanner);
if (count != 0) {
throw new Exception("count == " + count);
}
}
}
Aggregations