use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class ZookeeperRestartIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
c.tableOperations().create("test_ingest");
BatchWriter bw = c.createBatchWriter("test_ingest", null);
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.close();
// kill zookeeper
for (ProcessReference proc : cluster.getProcesses().get(ServerType.ZOOKEEPER)) cluster.killProcess(ServerType.ZOOKEEPER, proc);
// give the servers time to react
sleepUninterruptibly(1, TimeUnit.SECONDS);
// start zookeeper back up
cluster.start();
// use the tservers
try (Scanner s = c.createScanner("test_ingest", Authorizations.EMPTY)) {
Iterator<Entry<Key, Value>> i = s.iterator();
assertTrue(i.hasNext());
assertEquals("row", i.next().getKey().getRow().toString());
assertFalse(i.hasNext());
// use the master
c.tableOperations().delete("test_ingest");
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class CloseWriteAheadLogReferencesIT method partiallyReplicatedReferencedWalsAreNotClosed.
@Test
public void partiallyReplicatedReferencedWalsAreNotClosed() throws Exception {
String file = "file:/accumulo/wal/tserver+port/12345";
Set<String> wals = Collections.singleton(file);
BatchWriter bw = ReplicationTable.getBatchWriter(conn);
Mutation m = new Mutation(file);
StatusSection.add(m, Table.ID.of("1"), ProtobufUtil.toValue(StatusUtil.ingestedUntil(1000)));
bw.addMutation(m);
bw.close();
refs.updateReplicationEntries(conn, wals);
try (Scanner s = ReplicationTable.getScanner(conn)) {
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
Status status = Status.parseFrom(entry.getValue().get());
Assert.assertFalse(status.getClosed());
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class CloseWriteAheadLogReferencesIT method closedWalsUpdateStatus.
@Test
public void closedWalsUpdateStatus() throws Exception {
String file = "file:/accumulo/wal/tserver+port/12345";
Set<String> wals = Collections.singleton(file);
BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
Mutation m = new Mutation(ReplicationSection.getRowPrefix() + file);
m.put(ReplicationSection.COLF, new Text("1"), StatusUtil.fileCreatedValue(System.currentTimeMillis()));
bw.addMutation(m);
bw.close();
refs.updateReplicationEntries(conn, wals);
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.fetchColumnFamily(ReplicationSection.COLF);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
Status status = Status.parseFrom(entry.getValue().get());
Assert.assertTrue(status.getClosed());
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class RowDeleteIT method run.
@Test
public void run() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
Map<String, Set<Text>> groups = new HashMap<>();
groups.put("lg1", Collections.singleton(new Text("foo")));
groups.put("dg", Collections.emptySet());
c.tableOperations().setLocalityGroups(tableName, groups);
IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class);
c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.majc));
c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
bw.addMutation(nm("r1", "foo", "cf1", "v1"));
bw.addMutation(nm("r1", "bar", "cf1", "v2"));
bw.flush();
c.tableOperations().flush(tableName, null, null, true);
checkRFiles(c, tableName, 1, 1, 1, 1);
int count;
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
count = Iterators.size(scanner.iterator());
assertEquals("count == " + count, 2, count);
bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE));
bw.flush();
c.tableOperations().flush(tableName, null, null, true);
checkRFiles(c, tableName, 1, 1, 2, 2);
}
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
count = Iterators.size(scanner.iterator());
assertEquals("count == " + count, 3, count);
c.tableOperations().compact(tableName, null, null, false, true);
checkRFiles(c, tableName, 1, 1, 0, 0);
}
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
count = Iterators.size(scanner.iterator());
assertEquals("count == " + count, 0, count);
bw.close();
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class ScanIteratorIT method writeTestMutation.
private void writeTestMutation(Connector userC) throws TableNotFoundException, MutationsRejectedException {
BatchWriter batchWriter = userC.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("1");
m.put(new Text("2"), new Text("3"), new Value("".getBytes()));
batchWriter.addMutation(m);
batchWriter.flush();
batchWriter.close();
}
Aggregations