use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class TableChangeStateIT method createData.
/**
* Create the provided table and populate with some data using a batch writer. The table is scanned to ensure it was populated as expected.
*
* @param tableName
* the name of the table
*/
private void createData(final String tableName) {
try {
// create table.
connector.tableOperations().create(tableName);
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
// populate
for (int i = 0; i < NUM_ROWS; i++) {
Mutation m = new Mutation(new Text(String.format("%05d", i)));
m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes(UTF_8)));
bw.addMutation(m);
}
bw.close();
long startTimestamp = System.nanoTime();
try (Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY)) {
int count = 0;
for (Map.Entry<Key, Value> elt : scanner) {
String expected = String.format("%05d", count);
assert (elt.getKey().getRow().toString().equals(expected));
count++;
}
log.trace("Scan time for {} rows {} ms", NUM_ROWS, TimeUnit.MILLISECONDS.convert((System.nanoTime() - startTimestamp), TimeUnit.NANOSECONDS));
if (count != NUM_ROWS) {
throw new IllegalStateException(String.format("Number of rows %1$d does not match expected %2$d", count, NUM_ROWS));
}
}
} catch (AccumuloException | AccumuloSecurityException | TableNotFoundException | TableExistsException ex) {
throw new IllegalStateException("Create data failed with exception", ex);
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class TabletStateChangeIteratorIT method addDuplicateLocation.
private void addDuplicateLocation(String table, String tableNameToModify) throws TableNotFoundException, MutationsRejectedException {
Table.ID tableIdToModify = Table.ID.of(getConnector().tableOperations().tableIdMap().get(tableNameToModify));
Mutation m = new Mutation(new KeyExtent(tableIdToModify, null, null).getMetadataEntry());
m.put(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME, new Text("1234567"), new Value("fake:9005".getBytes(UTF_8)));
BatchWriter bw = getConnector().createBatchWriter(table, null);
bw.addMutation(m);
bw.close();
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class TabletStateChangeIteratorIT method reassignLocation.
private void reassignLocation(String table, String tableNameToModify) throws TableNotFoundException, MutationsRejectedException {
Table.ID tableIdToModify = Table.ID.of(getConnector().tableOperations().tableIdMap().get(tableNameToModify));
try (Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY)) {
scanner.setRange(new KeyExtent(tableIdToModify, null, null).toMetadataRange());
scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
Entry<Key, Value> entry = scanner.iterator().next();
Mutation m = new Mutation(entry.getKey().getRow());
m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), entry.getKey().getTimestamp());
m.put(entry.getKey().getColumnFamily(), new Text("1234567"), entry.getKey().getTimestamp() + 1, new Value("fake:9005".getBytes(UTF_8)));
BatchWriter bw = getConnector().createBatchWriter(table, null);
bw.addMutation(m);
bw.close();
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class TimeoutIT method testBatchWriterTimeout.
public void testBatchWriterTimeout(Connector conn, String tableName) throws Exception {
conn.tableOperations().create(tableName);
conn.tableOperations().addConstraint(tableName, SlowConstraint.class.getName());
// give constraint time to propagate through zookeeper
sleepUninterruptibly(1, TimeUnit.SECONDS);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
Mutation mut = new Mutation("r1");
mut.put("cf1", "cq1", "v1");
bw.addMutation(mut);
try {
bw.close();
fail("batch writer did not timeout");
} catch (MutationsRejectedException mre) {
if (mre.getCause() instanceof TimedOutException)
return;
throw mre;
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class TooManyDeletesIT method tooManyDeletesCompactionStrategyIT.
@Test
public void tooManyDeletesCompactionStrategyIT() throws Exception {
Connector c = getConnector();
String table = getUniqueNames(1)[0];
SummarizerConfiguration sc = SummarizerConfiguration.builder(DeletesSummarizer.class).build();
// TODO open issue about programatic config of compaction strategies
NewTableConfiguration ntc = new NewTableConfiguration().enableSummarization(sc);
HashMap<String, String> props = new HashMap<>();
props.put(Property.TABLE_COMPACTION_STRATEGY.getKey(), TooManyDeletesCompactionStrategy.class.getName());
props.put(Property.TABLE_COMPACTION_STRATEGY_PREFIX.getKey() + TooManyDeletesCompactionStrategy.THRESHOLD_OPT, ".25");
// ensure compaction does not happen because of the number of files
props.put(Property.TABLE_MAJC_RATIO.getKey(), "10");
ntc.setProperties(props);
c.tableOperations().create(table, ntc);
try (BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig())) {
for (int i = 0; i < 1000; i++) {
Mutation m = new Mutation("row" + i);
m.put("f", "q", "v" + i);
bw.addMutation(m);
}
}
List<Summary> summaries = c.tableOperations().summaries(table).flush(true).withConfiguration(sc).retrieve();
Assert.assertEquals(1, summaries.size());
Summary summary = summaries.get(0);
Assert.assertEquals(1000l, (long) summary.getStatistics().get(DeletesSummarizer.TOTAL_STAT));
Assert.assertEquals(0l, (long) summary.getStatistics().get(DeletesSummarizer.DELETES_STAT));
try (BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig())) {
for (int i = 0; i < 100; i++) {
Mutation m = new Mutation("row" + i);
m.putDelete("f", "q");
bw.addMutation(m);
}
}
summaries = c.tableOperations().summaries(table).flush(true).withConfiguration(sc).retrieve();
Assert.assertEquals(1, summaries.size());
summary = summaries.get(0);
Assert.assertEquals(1100l, (long) summary.getStatistics().get(DeletesSummarizer.TOTAL_STAT));
Assert.assertEquals(100l, (long) summary.getStatistics().get(DeletesSummarizer.DELETES_STAT));
try (BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig())) {
for (int i = 100; i < 300; i++) {
Mutation m = new Mutation("row" + i);
m.putDelete("f", "q");
bw.addMutation(m);
}
}
// after a flush occurs Accumulo will check if a major compaction is needed. This check should call the compaction strategy, which should decide to compact
// all files based on the number of deletes.
c.tableOperations().flush(table, null, null, true);
// wait for the compaction to happen
while (true) {
// the flush should cause
summaries = c.tableOperations().summaries(table).flush(false).withConfiguration(sc).retrieve();
Assert.assertEquals(1, summaries.size());
summary = summaries.get(0);
long total = summary.getStatistics().get(DeletesSummarizer.TOTAL_STAT);
long deletes = summary.getStatistics().get(DeletesSummarizer.DELETES_STAT);
if (total == 700 && deletes == 0) {
// a compaction was triggered based on the number of deletes
break;
}
UtilWaitThread.sleep(50);
}
}
Aggregations