use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class MetadataBatchScanTest method main.
public static void main(String[] args) throws Exception {
ClientOpts opts = new ClientOpts();
opts.parseArgs(MetadataBatchScanTest.class.getName(), args);
Instance inst = new ZooKeeperInstance(ClientConfiguration.create().withInstance("acu14").withZkHosts("localhost"));
final Connector connector = inst.getConnector(opts.getPrincipal(), opts.getToken());
TreeSet<Long> splits = new TreeSet<>();
Random r = new Random(42);
while (splits.size() < 99999) {
splits.add((r.nextLong() & 0x7fffffffffffffffl) % 1000000000000l);
}
Table.ID tid = Table.ID.of("8");
Text per = null;
ArrayList<KeyExtent> extents = new ArrayList<>();
for (Long split : splits) {
Text er = new Text(String.format("%012d", split));
KeyExtent ke = new KeyExtent(tid, er, per);
per = er;
extents.add(ke);
}
extents.add(new KeyExtent(tid, null, per));
if (args[0].equals("write")) {
BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (KeyExtent extent : extents) {
Mutation mut = extent.getPrevRowUpdateMutation();
new TServerInstance(HostAndPort.fromParts("192.168.1.100", 4567), "DEADBEEF").putLocation(mut);
bw.addMutation(mut);
}
bw.close();
} else if (args[0].equals("writeFiles")) {
BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (KeyExtent extent : extents) {
Mutation mut = new Mutation(extent.getMetadataEntry());
String dir = "/t-" + UUID.randomUUID();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes(UTF_8)));
for (int i = 0; i < 5; i++) {
mut.put(DataFileColumnFamily.NAME, new Text(dir + "/00000_0000" + i + ".map"), new DataFileValue(10000, 1000000).encodeAsValue());
}
bw.addMutation(mut);
}
bw.close();
} else if (args[0].equals("scan")) {
int numThreads = Integer.parseInt(args[1]);
final int numLoop = Integer.parseInt(args[2]);
int numLookups = Integer.parseInt(args[3]);
HashSet<Integer> indexes = new HashSet<>();
while (indexes.size() < numLookups) {
indexes.add(r.nextInt(extents.size()));
}
final List<Range> ranges = new ArrayList<>();
for (Integer i : indexes) {
ranges.add(extents.get(i).toMetadataRange());
}
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(new Runnable() {
@Override
public void run() {
try {
System.out.println(runScanTest(connector, numLoop, ranges));
} catch (Exception e) {
log.error("Exception while running scan test.", e);
}
}
});
}
long t1 = System.currentTimeMillis();
for (Thread thread : threads) {
thread.start();
}
for (Thread thread : threads) {
thread.join();
}
long t2 = System.currentTimeMillis();
System.out.printf("tt : %6.2f%n", (t2 - t1) / 1000.0);
} else {
throw new IllegalArgumentException();
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class MergeIT method mergeTest.
@Test
public void mergeTest() throws Exception {
int tc = 0;
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
runMergeTest(c, tableName + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
runMergeTest(c, tableName + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class MetadataIT method batchScanTest.
@Test
public void batchScanTest() throws Exception {
Connector c = getConnector();
String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
// batch scan regular metadata table
int count = 0;
try (BatchScanner s = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1)) {
s.setRanges(Collections.singleton(new Range()));
for (Entry<Key, Value> e : s) {
if (e != null)
count++;
}
}
assertTrue(count > 0);
// batch scan root metadata table
try (BatchScanner s = c.createBatchScanner(RootTable.NAME, Authorizations.EMPTY, 1)) {
s.setRanges(Collections.singleton(new Range()));
count = 0;
for (Entry<Key, Value> e : s) {
if (e != null)
count++;
}
assertTrue(count > 0);
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class MetadataIT method testFlushAndCompact.
@Test
public void testFlushAndCompact() throws Exception {
Connector c = getConnector();
String[] tableNames = getUniqueNames(2);
// create a table to write some data to metadata table
c.tableOperations().create(tableNames[0]);
try (Scanner rootScanner = c.createScanner(RootTable.NAME, Authorizations.EMPTY)) {
rootScanner.setRange(MetadataSchema.TabletsSection.getRange());
rootScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
Set<String> files1 = new HashSet<>();
for (Entry<Key, Value> entry : rootScanner) files1.add(entry.getKey().getColumnQualifier().toString());
c.tableOperations().create(tableNames[1]);
c.tableOperations().flush(MetadataTable.NAME, null, null, true);
Set<String> files2 = new HashSet<>();
for (Entry<Key, Value> entry : rootScanner) files2.add(entry.getKey().getColumnQualifier().toString());
// flush of metadata table should change file set in root table
Assert.assertTrue(files2.size() > 0);
Assert.assertNotEquals(files1, files2);
c.tableOperations().compact(MetadataTable.NAME, null, null, false, true);
Set<String> files3 = new HashSet<>();
for (Entry<Key, Value> entry : rootScanner) files3.add(entry.getKey().getColumnQualifier().toString());
// compaction of metadata table should change file set in root table
Assert.assertNotEquals(files2, files3);
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class MetadataSplitIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size());
c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500");
for (int i = 0; i < 10; i++) {
c.tableOperations().create("table" + i);
c.tableOperations().flush(MetadataTable.NAME, null, null, true);
}
sleepUninterruptibly(10, TimeUnit.SECONDS);
assertTrue(c.tableOperations().listSplits(MetadataTable.NAME).size() > 2);
}
Aggregations