use of org.apache.accumulo.core.client.BatchWriterConfig in project YCSB by brianfrankcooper.
the class AccumuloClient method getTable.
/**
* Called when the user specifies a table that isn't the same as the existing
* table. Connect to it and if necessary, close our current connection.
*
* @param t
* The table to open.
*/
public void getTable(String t) throws TableNotFoundException {
if (bw != null) {
// Close the existing writer if necessary.
try {
bw.close();
} catch (MutationsRejectedException e) {
// Couldn't spit out the mutations we wanted.
// Ignore this for now.
System.err.println("MutationsRejectedException: " + e.getMessage());
}
}
BatchWriterConfig bwc = new BatchWriterConfig();
bwc.setMaxLatency(Long.parseLong(getProperties().getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS);
bwc.setMaxMemory(Long.parseLong(getProperties().getProperty("accumulo.batchWriterSize", "100000")));
bwc.setMaxWriteThreads(Integer.parseInt(getProperties().getProperty("accumulo.batchWriterThreads", "1")));
bw = connector.createBatchWriter(t, bwc);
// Create our scanners
singleScanner = connector.createScanner(t, Authorizations.EMPTY);
scanScanner = connector.createScanner(t, Authorizations.EMPTY);
// Store the name of the table we have open.
table = t;
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class SplitRecoveryIT method test.
@Test
public void test() throws Exception {
String tableName = getUniqueNames(1)[0];
for (int tn = 0; tn < 2; tn++) {
Connector connector = getConnector();
// create a table and put some data in it
connector.tableOperations().create(tableName);
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
bw.addMutation(m("a"));
bw.addMutation(m("b"));
bw.addMutation(m("c"));
bw.close();
// take the table offline
connector.tableOperations().offline(tableName);
while (!isOffline(tableName, connector)) sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
// poke a partial split into the metadata table
connector.securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
KeyExtent extent = new KeyExtent(tableId, null, new Text("b"));
Mutation m = extent.getPrevRowUpdateMutation();
TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
bw.addMutation(m);
if (tn == 1) {
bw.flush();
try (Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
scanner.setRange(extent.toMetadataRange());
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
KeyExtent extent2 = new KeyExtent(tableId, new Text("b"), null);
m = extent2.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0".getBytes()));
for (Entry<Key, Value> entry : scanner) {
m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
}
bw.addMutation(m);
}
}
bw.close();
// bring the table online
connector.tableOperations().online(tableName);
// verify the tablets went online
try (Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY)) {
int i = 0;
String[] expected = { "a", "b", "c" };
for (Entry<Key, Value> entry : scanner) {
assertEquals(expected[i], entry.getKey().getRow().toString());
i++;
}
assertEquals(3, i);
connector.tableOperations().delete(tableName);
}
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class TableOperationsIT method getDiskUsage.
@Test
public void getDiskUsage() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException, TException {
final String[] names = getUniqueNames(2);
String tableName = names[0];
connector.tableOperations().create(tableName);
// verify 0 disk usage
List<DiskUsage> diskUsages = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
assertEquals(1, diskUsages.size());
assertEquals(1, diskUsages.get(0).getTables().size());
assertEquals(Long.valueOf(0), diskUsages.get(0).getUsage());
assertEquals(tableName, diskUsages.get(0).getTables().first());
// add some data
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("a");
m.put("b", "c", new Value("abcde".getBytes()));
bw.addMutation(m);
bw.flush();
bw.close();
connector.tableOperations().compact(tableName, new Text("A"), new Text("z"), true, true);
// verify we have usage
diskUsages = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
assertEquals(1, diskUsages.size());
assertEquals(1, diskUsages.get(0).getTables().size());
assertTrue(diskUsages.get(0).getUsage() > 0);
assertEquals(tableName, diskUsages.get(0).getTables().first());
String newTable = names[1];
// clone table
connector.tableOperations().clone(tableName, newTable, false, null, null);
// verify tables are exactly the same
Set<String> tables = new HashSet<>();
tables.add(tableName);
tables.add(newTable);
diskUsages = connector.tableOperations().getDiskUsage(tables);
assertEquals(1, diskUsages.size());
assertEquals(2, diskUsages.get(0).getTables().size());
assertTrue(diskUsages.get(0).getUsage() > 0);
connector.tableOperations().compact(tableName, new Text("A"), new Text("z"), true, true);
connector.tableOperations().compact(newTable, new Text("A"), new Text("z"), true, true);
// verify tables have differences
diskUsages = connector.tableOperations().getDiskUsage(tables);
assertEquals(2, diskUsages.size());
assertEquals(1, diskUsages.get(0).getTables().size());
assertEquals(1, diskUsages.get(1).getTables().size());
assertTrue(diskUsages.get(0).getUsage() > 0);
assertTrue(diskUsages.get(1).getUsage() > 0);
connector.tableOperations().delete(tableName);
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class TableOperationsIT method createMergeClonedTable.
@Test
public void createMergeClonedTable() throws Exception {
String[] names = getUniqueNames(2);
String originalTable = names[0];
TableOperations tops = connector.tableOperations();
TreeSet<Text> splits = Sets.newTreeSet(Arrays.asList(new Text("a"), new Text("b"), new Text("c"), new Text("d")));
tops.create(originalTable);
tops.addSplits(originalTable, splits);
BatchWriter bw = connector.createBatchWriter(originalTable, new BatchWriterConfig());
for (Text row : splits) {
Mutation m = new Mutation(row);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
m.put(Integer.toString(i), Integer.toString(j), Integer.toString(i + j));
}
}
bw.addMutation(m);
}
bw.close();
String clonedTable = names[1];
tops.clone(originalTable, clonedTable, true, null, null);
tops.merge(clonedTable, null, new Text("b"));
Map<String, Integer> rowCounts = new HashMap<>();
try (Scanner s = connector.createScanner(clonedTable, new Authorizations())) {
for (Entry<Key, Value> entry : s) {
final Key key = entry.getKey();
String row = key.getRow().toString();
String cf = key.getColumnFamily().toString(), cq = key.getColumnQualifier().toString();
String value = entry.getValue().toString();
if (rowCounts.containsKey(row)) {
rowCounts.put(row, rowCounts.get(row) + 1);
} else {
rowCounts.put(row, 1);
}
Assert.assertEquals(Integer.parseInt(cf) + Integer.parseInt(cq), Integer.parseInt(value));
}
}
Collection<Text> clonedSplits = tops.listSplits(clonedTable);
Set<Text> expectedSplits = Sets.newHashSet(new Text("b"), new Text("c"), new Text("d"));
for (Text clonedSplit : clonedSplits) {
Assert.assertTrue("Encountered unexpected split on the cloned table: " + clonedSplit, expectedSplits.remove(clonedSplit));
}
Assert.assertTrue("Did not find all expected splits on the cloned table: " + expectedSplits, expectedSplits.isEmpty());
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class CleanTmpIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
// make a table
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
// write to it
BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.flush();
// Compact memory to make a file
c.tableOperations().compact(tableName, null, null, true, true);
// Make sure that we'll have a WAL
m = new Mutation("row2");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.close();
// create a fake _tmp file in its directory
String id = c.tableOperations().tableIdMap().get(tableName);
Path file;
try (Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(Range.prefix(id));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
file = new Path(entry.getKey().getColumnQualifier().toString());
}
FileSystem fs = getCluster().getFileSystem();
assertTrue("Could not find file: " + file, fs.exists(file));
Path tabletDir = file.getParent();
assertNotNull("Tablet dir should not be null", tabletDir);
Path tmp = new Path(tabletDir, "junk.rf_tmp");
// Make the file
fs.create(tmp).close();
log.info("Created tmp file {}", tmp.toString());
getCluster().stop();
getCluster().start();
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
assertEquals(2, Iterators.size(scanner.iterator()));
// If we performed log recovery, we should have cleaned up any stray files
assertFalse("File still exists: " + tmp, fs.exists(tmp));
}
}
Aggregations