use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class MergeIT method merge.
@Test
public void merge() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
BatchWriter bw = c.createBatchWriter(tableName, null);
for (String row : "a b c d e f g h i j k".split(" ")) {
Mutation m = new Mutation(row);
m.put("cf", "cq", "value");
bw.addMutation(m);
}
bw.close();
c.tableOperations().flush(tableName, null, null, true);
c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
assertEquals(8, c.tableOperations().listSplits(tableName).size());
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class MergeIT method runMergeTest.
private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception {
System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
TreeSet<Text> splitSet = new TreeSet<>();
for (String split : splits) {
splitSet.add(new Text(split));
}
conn.tableOperations().addSplits(table, splitSet);
BatchWriter bw = conn.createBatchWriter(table, null);
HashSet<String> expected = new HashSet<>();
for (String row : inserts) {
Mutation m = new Mutation(row);
m.put("cf", "cq", row);
bw.addMutation(m);
expected.add(row);
}
bw.close();
conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
try (Scanner scanner = conn.createScanner(table, Authorizations.EMPTY)) {
HashSet<String> observed = new HashSet<>();
for (Entry<Key, Value> entry : scanner) {
String row = entry.getKey().getRowData().toString();
if (!observed.add(row)) {
throw new Exception("Saw data twice " + table + " " + row);
}
}
if (!observed.equals(expected)) {
throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
}
HashSet<Text> currentSplits = new HashSet<>(conn.tableOperations().listSplits(table));
HashSet<Text> ess = new HashSet<>();
for (String es : expectedSplits) {
ess.add(new Text(es));
}
if (!currentSplits.equals(ess)) {
throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
}
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class MetadataBatchScanTest method main.
public static void main(String[] args) throws Exception {
ClientOpts opts = new ClientOpts();
opts.parseArgs(MetadataBatchScanTest.class.getName(), args);
Instance inst = new ZooKeeperInstance(ClientConfiguration.create().withInstance("acu14").withZkHosts("localhost"));
final Connector connector = inst.getConnector(opts.getPrincipal(), opts.getToken());
TreeSet<Long> splits = new TreeSet<>();
Random r = new Random(42);
while (splits.size() < 99999) {
splits.add((r.nextLong() & 0x7fffffffffffffffl) % 1000000000000l);
}
Table.ID tid = Table.ID.of("8");
Text per = null;
ArrayList<KeyExtent> extents = new ArrayList<>();
for (Long split : splits) {
Text er = new Text(String.format("%012d", split));
KeyExtent ke = new KeyExtent(tid, er, per);
per = er;
extents.add(ke);
}
extents.add(new KeyExtent(tid, null, per));
if (args[0].equals("write")) {
BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (KeyExtent extent : extents) {
Mutation mut = extent.getPrevRowUpdateMutation();
new TServerInstance(HostAndPort.fromParts("192.168.1.100", 4567), "DEADBEEF").putLocation(mut);
bw.addMutation(mut);
}
bw.close();
} else if (args[0].equals("writeFiles")) {
BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (KeyExtent extent : extents) {
Mutation mut = new Mutation(extent.getMetadataEntry());
String dir = "/t-" + UUID.randomUUID();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes(UTF_8)));
for (int i = 0; i < 5; i++) {
mut.put(DataFileColumnFamily.NAME, new Text(dir + "/00000_0000" + i + ".map"), new DataFileValue(10000, 1000000).encodeAsValue());
}
bw.addMutation(mut);
}
bw.close();
} else if (args[0].equals("scan")) {
int numThreads = Integer.parseInt(args[1]);
final int numLoop = Integer.parseInt(args[2]);
int numLookups = Integer.parseInt(args[3]);
HashSet<Integer> indexes = new HashSet<>();
while (indexes.size() < numLookups) {
indexes.add(r.nextInt(extents.size()));
}
final List<Range> ranges = new ArrayList<>();
for (Integer i : indexes) {
ranges.add(extents.get(i).toMetadataRange());
}
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
try {
System.out.println(runScanTest(connector, numLoop, ranges));
} catch (Exception e) {
log.error("Exception while running scan test.", e);
}
}
});
}
long t1 = System.currentTimeMillis();
for (Thread thread : threads) {
thread.start();
}
for (Thread thread : threads) {
thread.join();
}
long t2 = System.currentTimeMillis();
System.out.printf("tt : %6.2f%n", (t2 - t1) / 1000.0);
} else {
throw new IllegalArgumentException();
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class PermissionsIT method createTestTable.
private void createTestTable(Connector c, String testUser, String tableName) throws Exception, MutationsRejectedException {
if (!c.tableOperations().exists(tableName)) {
// create the test table
c.tableOperations().create(tableName);
// put in some initial data
BatchWriter writer = c.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation(new Text("row"));
m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
writer.addMutation(m);
writer.close();
// verify proper permissions for creator and test user
verifyHasOnlyTheseTablePermissions(c, c.whoami(), tableName, TablePermission.values());
verifyHasNoTablePermissions(c, testUser, tableName, TablePermission.values());
}
}
use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.
the class PermissionsIT method testGrantedTablePermission.
private void testGrantedTablePermission(Connector test_user_conn, ClusterUser normalUser, TablePermission perm, String tableName) throws AccumuloException, TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
BatchWriter writer;
Mutation m;
log.debug("Confirming that the presence of the {} permission properly permits the user", perm);
// test permission after granting it
switch(perm) {
case READ:
try (Scanner scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY)) {
Iterator<Entry<Key, Value>> iter = scanner.iterator();
while (iter.hasNext()) iter.next();
}
break;
case WRITE:
writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
m = new Mutation(new Text("row"));
m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
writer.addMutation(m);
writer.close();
break;
case BULK_IMPORT:
// test for bulk import permission would go here
break;
case ALTER_TABLE:
Map<String, Set<Text>> groups = new HashMap<>();
groups.put("tgroup", new HashSet<>(Arrays.asList(new Text("t1"), new Text("t2"))));
break;
case DROP_TABLE:
test_user_conn.tableOperations().delete(tableName);
break;
case GRANT:
test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), tableName, TablePermission.GRANT);
break;
case GET_SUMMARIES:
List<Summary> summaries = test_user_conn.tableOperations().summaries(tableName).retrieve();
// just make sure it's not blocked by permissions, the actual summaries are tested in SummaryIT
Assert.assertTrue(summaries.isEmpty());
break;
default:
throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
}
}
Aggregations