use of org.apache.accumulo.core.client.BatchWriterConfig in project hive by apache.
the class TestAccumuloDefaultIndexScanner method buildMockHandler.
public static AccumuloDefaultIndexScanner buildMockHandler(int maxMatches) {
try {
String table = "table";
Text emptyText = new Text("");
Configuration conf = new Configuration();
conf.set(AccumuloIndexParameters.INDEXTABLE_NAME, table);
conf.setInt(AccumuloIndexParameters.MAX_INDEX_ROWS, maxMatches);
conf.set(AccumuloIndexParameters.INDEXED_COLUMNS, "*");
conf.set(serdeConstants.LIST_COLUMNS, "rid,name,age,cars,mgr");
conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowId,name:name,age:age,cars:cars,mgr:mgr");
AccumuloDefaultIndexScanner handler = new AccumuloDefaultIndexScanner();
handler.init(conf);
MockInstance inst = new MockInstance("test_instance");
Connector conn = inst.getConnector("root", new PasswordToken(""));
if (!conn.tableOperations().exists(table)) {
conn.tableOperations().create(table);
BatchWriterConfig batchConfig = new BatchWriterConfig();
BatchWriter writer = conn.createBatchWriter(table, batchConfig);
addRow(writer, "fred", "name_name", "row1");
addRow(writer, "25", "age_age", "row1");
addRow(writer, 5, "cars_cars", "row1");
addRow(writer, true, "mgr_mgr", "row1");
addRow(writer, "bill", "name_name", "row2");
addRow(writer, "20", "age_age", "row2");
addRow(writer, 2, "cars_cars", "row2");
addRow(writer, false, "mgr_mgr", "row2");
addRow(writer, "sally", "name_name", "row3");
addRow(writer, "23", "age_age", "row3");
addRow(writer, 6, "cars_cars", "row3");
addRow(writer, true, "mgr_mgr", "row3");
addRow(writer, "rob", "name_name", "row4");
addRow(writer, "60", "age_age", "row4");
addRow(writer, 1, "cars_cars", "row4");
addRow(writer, false, "mgr_mgr", "row4");
writer.close();
}
AccumuloConnectionParameters connectionParams = Mockito.mock(AccumuloConnectionParameters.class);
AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class);
Mockito.when(connectionParams.getConnector()).thenReturn(conn);
handler.setConnectParams(connectionParams);
return handler;
} catch (AccumuloSecurityException | AccumuloException | TableExistsException | TableNotFoundException e) {
LOG.error(e.getLocalizedMessage(), e);
}
return null;
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project apex-malhar by apache.
the class AccumuloTestHelper method populateAccumulo.
public static void populateAccumulo() throws IOException {
BatchWriterConfig config = new BatchWriterConfig();
BatchWriter batchwriter = null;
try {
batchwriter = con.createBatchWriter("tab1", config);
} catch (TableNotFoundException e) {
logger.error("error in test helper");
DTThrowable.rethrow(e);
}
try {
for (int i = 0; i < 500; ++i) {
String rowstr = "row" + i;
Mutation mutation = new Mutation(rowstr.getBytes());
for (int j = 0; j < 500; ++j) {
String colstr = "col" + "-" + j;
String valstr = "val" + "-" + i + "-" + j;
mutation.put(colfam0_bytes, colstr.getBytes(), System.currentTimeMillis(), valstr.getBytes());
}
batchwriter.addMutation(mutation);
}
batchwriter.close();
} catch (MutationsRejectedException e) {
logger.error("error in test helper");
DTThrowable.rethrow(e);
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class SplitRecoveryIT method test.
@Test
public void test() throws Exception {
String tableName = getUniqueNames(1)[0];
for (int tn = 0; tn < 2; tn++) {
Connector connector = getConnector();
// create a table and put some data in it
connector.tableOperations().create(tableName);
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
bw.addMutation(m("a"));
bw.addMutation(m("b"));
bw.addMutation(m("c"));
bw.close();
// take the table offline
connector.tableOperations().offline(tableName);
while (!isOffline(tableName, connector)) sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
// poke a partial split into the metadata table
connector.securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
KeyExtent extent = new KeyExtent(tableId, null, new Text("b"));
Mutation m = extent.getPrevRowUpdateMutation();
TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
bw.addMutation(m);
if (tn == 1) {
bw.flush();
try (Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
scanner.setRange(extent.toMetadataRange());
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
KeyExtent extent2 = new KeyExtent(tableId, new Text("b"), null);
m = extent2.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0".getBytes()));
for (Entry<Key, Value> entry : scanner) {
m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
}
bw.addMutation(m);
}
}
bw.close();
// bring the table online
connector.tableOperations().online(tableName);
// verify the tablets went online
try (Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY)) {
int i = 0;
String[] expected = { "a", "b", "c" };
for (Entry<Key, Value> entry : scanner) {
assertEquals(expected[i], entry.getKey().getRow().toString());
i++;
}
assertEquals(3, i);
connector.tableOperations().delete(tableName);
}
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class TableOperationsIT method getDiskUsage.
@Test
public void getDiskUsage() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException, TException {
final String[] names = getUniqueNames(2);
String tableName = names[0];
connector.tableOperations().create(tableName);
// verify 0 disk usage
List<DiskUsage> diskUsages = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
assertEquals(1, diskUsages.size());
assertEquals(1, diskUsages.get(0).getTables().size());
assertEquals(Long.valueOf(0), diskUsages.get(0).getUsage());
assertEquals(tableName, diskUsages.get(0).getTables().first());
// add some data
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("a");
m.put("b", "c", new Value("abcde".getBytes()));
bw.addMutation(m);
bw.flush();
bw.close();
connector.tableOperations().compact(tableName, new Text("A"), new Text("z"), true, true);
// verify we have usage
diskUsages = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
assertEquals(1, diskUsages.size());
assertEquals(1, diskUsages.get(0).getTables().size());
assertTrue(diskUsages.get(0).getUsage() > 0);
assertEquals(tableName, diskUsages.get(0).getTables().first());
String newTable = names[1];
// clone table
connector.tableOperations().clone(tableName, newTable, false, null, null);
// verify tables are exactly the same
Set<String> tables = new HashSet<>();
tables.add(tableName);
tables.add(newTable);
diskUsages = connector.tableOperations().getDiskUsage(tables);
assertEquals(1, diskUsages.size());
assertEquals(2, diskUsages.get(0).getTables().size());
assertTrue(diskUsages.get(0).getUsage() > 0);
connector.tableOperations().compact(tableName, new Text("A"), new Text("z"), true, true);
connector.tableOperations().compact(newTable, new Text("A"), new Text("z"), true, true);
// verify tables have differences
diskUsages = connector.tableOperations().getDiskUsage(tables);
assertEquals(2, diskUsages.size());
assertEquals(1, diskUsages.get(0).getTables().size());
assertEquals(1, diskUsages.get(1).getTables().size());
assertTrue(diskUsages.get(0).getUsage() > 0);
assertTrue(diskUsages.get(1).getUsage() > 0);
connector.tableOperations().delete(tableName);
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class TableOperationsIT method createMergeClonedTable.
@Test
public void createMergeClonedTable() throws Exception {
String[] names = getUniqueNames(2);
String originalTable = names[0];
TableOperations tops = connector.tableOperations();
TreeSet<Text> splits = Sets.newTreeSet(Arrays.asList(new Text("a"), new Text("b"), new Text("c"), new Text("d")));
tops.create(originalTable);
tops.addSplits(originalTable, splits);
BatchWriter bw = connector.createBatchWriter(originalTable, new BatchWriterConfig());
for (Text row : splits) {
Mutation m = new Mutation(row);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
m.put(Integer.toString(i), Integer.toString(j), Integer.toString(i + j));
}
}
bw.addMutation(m);
}
bw.close();
String clonedTable = names[1];
tops.clone(originalTable, clonedTable, true, null, null);
tops.merge(clonedTable, null, new Text("b"));
Map<String, Integer> rowCounts = new HashMap<>();
try (Scanner s = connector.createScanner(clonedTable, new Authorizations())) {
for (Entry<Key, Value> entry : s) {
final Key key = entry.getKey();
String row = key.getRow().toString();
String cf = key.getColumnFamily().toString(), cq = key.getColumnQualifier().toString();
String value = entry.getValue().toString();
if (rowCounts.containsKey(row)) {
rowCounts.put(row, rowCounts.get(row) + 1);
} else {
rowCounts.put(row, 1);
}
Assert.assertEquals(Integer.parseInt(cf) + Integer.parseInt(cq), Integer.parseInt(value));
}
}
Collection<Text> clonedSplits = tops.listSplits(clonedTable);
Set<Text> expectedSplits = Sets.newHashSet(new Text("b"), new Text("c"), new Text("d"));
for (Text clonedSplit : clonedSplits) {
Assert.assertTrue("Encountered unexpected split on the cloned table: " + clonedSplit, expectedSplits.remove(clonedSplit));
}
Assert.assertTrue("Did not find all expected splits on the cloned table: " + expectedSplits, expectedSplits.isEmpty());
}
Aggregations