use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class VolumeIT method writeData.
private void writeData(String tableName, Connector conn) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException {
TreeSet<Text> splits = new TreeSet<>();
for (int i = 1; i < 100; i++) {
splits.add(new Text(String.format("%06d", i * 100)));
}
conn.tableOperations().create(tableName);
conn.tableOperations().addSplits(tableName, splits);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
for (int i = 0; i < 100; i++) {
String row = String.format("%06d", i * 100 + 3);
Mutation m = new Mutation(row);
m.put("cf1", "cq1", "1");
bw.addMutation(m);
}
bw.close();
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class TracerRecoversAfterOfflineTableIT method test.
@Test
public void test() throws Exception {
Process tracer = null;
Connector conn = getConnector();
if (!conn.tableOperations().exists("trace")) {
MiniAccumuloClusterImpl mac = cluster;
tracer = mac.exec(TraceServer.class);
while (!conn.tableOperations().exists("trace")) {
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
sleepUninterruptibly(5, TimeUnit.SECONDS);
}
log.info("Taking table offline");
conn.tableOperations().offline("trace", true);
String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
log.info("Start a distributed trace span");
DistributedTrace.enable("localhost", "testTrace", getClientConfig());
Span root = Trace.on("traceTest");
BatchWriter bw = conn.createBatchWriter(tableName, null);
Mutation m = new Mutation("m");
m.put("a", "b", "c");
bw.addMutation(m);
bw.close();
root.stop();
log.info("Bringing trace table back online");
conn.tableOperations().online("trace", true);
log.info("Trace table is online, should be able to find trace");
try (Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY)) {
scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
while (true) {
final StringBuilder finalBuffer = new StringBuilder();
int traceCount = TraceDump.printTrace(scanner, new Printer() {
@Override
public void print(final String line) {
try {
finalBuffer.append(line).append("\n");
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
});
String traceOutput = finalBuffer.toString();
log.info("Trace output:{}", traceOutput);
if (traceCount > 0) {
int lastPos = 0;
for (String part : "traceTest,close,binMutations".split(",")) {
log.info("Looking in trace output for '{}'", part);
int pos = traceOutput.indexOf(part);
assertTrue("Did not find '" + part + "' in output", pos > 0);
assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
lastPos = pos;
}
break;
} else {
log.info("Ignoring trace output as traceCount not greater than zero: {}", traceCount);
Thread.sleep(1000);
}
}
if (tracer != null) {
tracer.destroy();
}
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class UserCompactionStrategyIT method writeFlush.
private void writeFlush(Connector conn, String tablename, String row) throws Exception {
BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
Mutation m = new Mutation(row);
m.put("", "", "");
bw.addMutation(m);
bw.close();
conn.tableOperations().flush(tablename, null, null, true);
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class VerifySerialRecoveryIT method testSerializedRecovery.
@Test(timeout = 4 * 60 * 1000)
public void testSerializedRecovery() throws Exception {
// make a table with many splits
String tableName = getUniqueNames(1)[0];
Connector c = getConnector();
c.tableOperations().create(tableName);
SortedSet<Text> splits = new TreeSet<>();
for (int i = 0; i < 200; i++) {
splits.add(new Text(randomHex(8)));
}
c.tableOperations().addSplits(tableName, splits);
// load data to give the recovery something to do
BatchWriter bw = c.createBatchWriter(tableName, null);
for (int i = 0; i < 50000; i++) {
Mutation m = new Mutation(randomHex(8));
m.put("", "", "");
bw.addMutation(m);
}
bw.close();
// kill the tserver
for (ProcessReference ref : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) getCluster().killProcess(ServerType.TABLET_SERVER, ref);
final Process ts = cluster.exec(TabletServer.class);
// wait for recovery
Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
ts.waitFor();
String result = FunctionalTestUtils.readAll(cluster, TabletServer.class, ts);
for (String line : result.split("\n")) {
System.out.println(line);
}
// walk through the output, verifying that only a single normal recovery was running at one time
boolean started = false;
int recoveries = 0;
for (String line : result.split("\n")) {
// ignore metadata tables
if (line.contains("!0") || line.contains("+r"))
continue;
if (line.contains("Starting Write-Ahead Log")) {
assertFalse(started);
started = true;
recoveries++;
}
if (line.contains("Write-Ahead Log recovery complete")) {
assertTrue(started);
started = false;
}
}
assertFalse(started);
assertTrue(recoveries > 0);
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class BadIteratorMincIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
IteratorSetting is = new IteratorSetting(30, BadIterator.class);
c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation(new Text("r1"));
m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
bw.addMutation(m);
bw.close();
c.tableOperations().flush(tableName, null, null, false);
sleepUninterruptibly(1, TimeUnit.SECONDS);
// minc should fail, so there should be no files
FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
// try to scan table
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
int count = Iterators.size(scanner.iterator());
assertEquals("Did not see expected # entries " + count, 1, count);
// remove the bad iterator
c.tableOperations().removeIterator(tableName, BadIterator.class.getSimpleName(), EnumSet.of(IteratorScope.minc));
sleepUninterruptibly(5, TimeUnit.SECONDS);
// minc should complete
FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
count = Iterators.size(scanner.iterator());
if (count != 1)
throw new Exception("Did not see expected # entries " + count);
// now try putting bad iterator back and deleting the table
c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
bw = c.createBatchWriter(tableName, new BatchWriterConfig());
m = new Mutation(new Text("r2"));
m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
bw.addMutation(m);
bw.close();
// make sure property is given time to propagate
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
c.tableOperations().flush(tableName, null, null, false);
// make sure the flush has time to start
sleepUninterruptibly(1, TimeUnit.SECONDS);
// this should not hang
c.tableOperations().delete(tableName);
}
}
Aggregations