Search in sources :

Example 96 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class MetadataBatchScanTest method main.

public static void main(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(MetadataBatchScanTest.class.getName(), args);
    Instance inst = new ZooKeeperInstance(ClientConfiguration.create().withInstance("acu14").withZkHosts("localhost"));
    final Connector connector = inst.getConnector(opts.getPrincipal(), opts.getToken());
    TreeSet<Long> splits = new TreeSet<>();
    Random r = new Random(42);
    while (splits.size() < 99999) {
        splits.add((r.nextLong() & 0x7fffffffffffffffl) % 1000000000000l);
    }
    Table.ID tid = Table.ID.of("8");
    Text per = null;
    ArrayList<KeyExtent> extents = new ArrayList<>();
    for (Long split : splits) {
        Text er = new Text(String.format("%012d", split));
        KeyExtent ke = new KeyExtent(tid, er, per);
        per = er;
        extents.add(ke);
    }
    extents.add(new KeyExtent(tid, null, per));
    if (args[0].equals("write")) {
        BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        for (KeyExtent extent : extents) {
            Mutation mut = extent.getPrevRowUpdateMutation();
            new TServerInstance(HostAndPort.fromParts("192.168.1.100", 4567), "DEADBEEF").putLocation(mut);
            bw.addMutation(mut);
        }
        bw.close();
    } else if (args[0].equals("writeFiles")) {
        BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        for (KeyExtent extent : extents) {
            Mutation mut = new Mutation(extent.getMetadataEntry());
            String dir = "/t-" + UUID.randomUUID();
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes(UTF_8)));
            for (int i = 0; i < 5; i++) {
                mut.put(DataFileColumnFamily.NAME, new Text(dir + "/00000_0000" + i + ".map"), new DataFileValue(10000, 1000000).encodeAsValue());
            }
            bw.addMutation(mut);
        }
        bw.close();
    } else if (args[0].equals("scan")) {
        int numThreads = Integer.parseInt(args[1]);
        final int numLoop = Integer.parseInt(args[2]);
        int numLookups = Integer.parseInt(args[3]);
        HashSet<Integer> indexes = new HashSet<>();
        while (indexes.size() < numLookups) {
            indexes.add(r.nextInt(extents.size()));
        }
        final List<Range> ranges = new ArrayList<>();
        for (Integer i : indexes) {
            ranges.add(extents.get(i).toMetadataRange());
        }
        Thread[] threads = new Thread[numThreads];
        for (int i = 0; i < threads.length; i++) {
            threads[i] = new Thread(new Runnable() {

                @Override
                public void run() {
                    try {
                        System.out.println(runScanTest(connector, numLoop, ranges));
                    } catch (Exception e) {
                        log.error("Exception while running scan test.", e);
                    }
                }
            });
        }
        long t1 = System.currentTimeMillis();
        for (Thread thread : threads) {
            thread.start();
        }
        for (Thread thread : threads) {
            thread.join();
        }
        long t2 = System.currentTimeMillis();
        System.out.printf("tt : %6.2f%n", (t2 - t1) / 1000.0);
    } else {
        throw new IllegalArgumentException();
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) ArrayList(java.util.ArrayList) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) Random(java.util.Random) TreeSet(java.util.TreeSet) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) HashSet(java.util.HashSet) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) ClientOpts(org.apache.accumulo.core.cli.ClientOpts) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 97 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class MergeIT method mergeTest.

@Test
public void mergeTest() throws Exception {
    int tc = 0;
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    runMergeTest(c, tableName + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
    runMergeTest(c, tableName + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
    runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
    runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
    runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Test(org.junit.Test)

Example 98 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class MetadataIT method batchScanTest.

@Test
public void batchScanTest() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    // batch scan regular metadata table
    int count = 0;
    try (BatchScanner s = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1)) {
        s.setRanges(Collections.singleton(new Range()));
        for (Entry<Key, Value> e : s) {
            if (e != null)
                count++;
        }
    }
    assertTrue(count > 0);
    // batch scan root metadata table
    try (BatchScanner s = c.createBatchScanner(RootTable.NAME, Authorizations.EMPTY, 1)) {
        s.setRanges(Collections.singleton(new Range()));
        count = 0;
        for (Entry<Key, Value> e : s) {
            if (e != null)
                count++;
        }
        assertTrue(count > 0);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Value(org.apache.accumulo.core.data.Value) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 99 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class MetadataIT method testFlushAndCompact.

@Test
public void testFlushAndCompact() throws Exception {
    Connector c = getConnector();
    String[] tableNames = getUniqueNames(2);
    // create a table to write some data to metadata table
    c.tableOperations().create(tableNames[0]);
    try (Scanner rootScanner = c.createScanner(RootTable.NAME, Authorizations.EMPTY)) {
        rootScanner.setRange(MetadataSchema.TabletsSection.getRange());
        rootScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        Set<String> files1 = new HashSet<>();
        for (Entry<Key, Value> entry : rootScanner) files1.add(entry.getKey().getColumnQualifier().toString());
        c.tableOperations().create(tableNames[1]);
        c.tableOperations().flush(MetadataTable.NAME, null, null, true);
        Set<String> files2 = new HashSet<>();
        for (Entry<Key, Value> entry : rootScanner) files2.add(entry.getKey().getColumnQualifier().toString());
        // flush of metadata table should change file set in root table
        Assert.assertTrue(files2.size() > 0);
        Assert.assertNotEquals(files1, files2);
        c.tableOperations().compact(MetadataTable.NAME, null, null, false, true);
        Set<String> files3 = new HashSet<>();
        for (Entry<Key, Value> entry : rootScanner) files3.add(entry.getKey().getColumnQualifier().toString());
        // compaction of metadata table should change file set in root table
        Assert.assertNotEquals(files2, files3);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 100 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class MetadataSplitIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size());
    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500");
    for (int i = 0; i < 10; i++) {
        c.tableOperations().create("table" + i);
        c.tableOperations().flush(MetadataTable.NAME, null, null, true);
    }
    sleepUninterruptibly(10, TimeUnit.SECONDS);
    assertTrue(c.tableOperations().listSplits(MetadataTable.NAME).size() > 2);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Test(org.junit.Test)

Aggregations

Connector (org.apache.accumulo.core.client.Connector)622 Test (org.junit.Test)415 BatchWriter (org.apache.accumulo.core.client.BatchWriter)171 Value (org.apache.accumulo.core.data.Value)162 Text (org.apache.hadoop.io.Text)160 Scanner (org.apache.accumulo.core.client.Scanner)158 Mutation (org.apache.accumulo.core.data.Mutation)152 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)143 Key (org.apache.accumulo.core.data.Key)139 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)101 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)87 AccumuloException (org.apache.accumulo.core.client.AccumuloException)83 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)75 Range (org.apache.accumulo.core.data.Range)74 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)65 Authorizations (org.apache.accumulo.core.security.Authorizations)60 HashSet (java.util.HashSet)57 Instance (org.apache.accumulo.core.client.Instance)55 ArrayList (java.util.ArrayList)53 Entry (java.util.Map.Entry)53