Search in sources :

Example 66 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class MergeIT method merge.

@Test
public void merge() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
    BatchWriter bw = c.createBatchWriter(tableName, null);
    for (String row : "a b c d e f g h i j k".split(" ")) {
        Mutation m = new Mutation(row);
        m.put("cf", "cq", "value");
        bw.addMutation(m);
    }
    bw.close();
    c.tableOperations().flush(tableName, null, null, true);
    c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
    assertEquals(8, c.tableOperations().listSplits(tableName).size());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 67 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class MergeIT method runMergeTest.

private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception {
    System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
    TreeSet<Text> splitSet = new TreeSet<>();
    for (String split : splits) {
        splitSet.add(new Text(split));
    }
    conn.tableOperations().addSplits(table, splitSet);
    BatchWriter bw = conn.createBatchWriter(table, null);
    HashSet<String> expected = new HashSet<>();
    for (String row : inserts) {
        Mutation m = new Mutation(row);
        m.put("cf", "cq", row);
        bw.addMutation(m);
        expected.add(row);
    }
    bw.close();
    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
    try (Scanner scanner = conn.createScanner(table, Authorizations.EMPTY)) {
        HashSet<String> observed = new HashSet<>();
        for (Entry<Key, Value> entry : scanner) {
            String row = entry.getKey().getRowData().toString();
            if (!observed.add(row)) {
                throw new Exception("Saw data twice " + table + " " + row);
            }
        }
        if (!observed.equals(expected)) {
            throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
        }
        HashSet<Text> currentSplits = new HashSet<>(conn.tableOperations().listSplits(table));
        HashSet<Text> ess = new HashSet<>();
        for (String es : expectedSplits) {
            ess.add(new Text(es));
        }
        if (!currentSplits.equals(ess)) {
            throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
        }
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) TabletDeletedException(org.apache.accumulo.server.util.TabletIterator.TabletDeletedException) ExpectedException(org.junit.rules.ExpectedException) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet)

Example 68 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class MetadataBatchScanTest method main.

public static void main(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(MetadataBatchScanTest.class.getName(), args);
    Instance inst = new ZooKeeperInstance(ClientConfiguration.create().withInstance("acu14").withZkHosts("localhost"));
    final Connector connector = inst.getConnector(opts.getPrincipal(), opts.getToken());
    TreeSet<Long> splits = new TreeSet<>();
    Random r = new Random(42);
    while (splits.size() < 99999) {
        splits.add((r.nextLong() & 0x7fffffffffffffffl) % 1000000000000l);
    }
    Table.ID tid = Table.ID.of("8");
    Text per = null;
    ArrayList<KeyExtent> extents = new ArrayList<>();
    for (Long split : splits) {
        Text er = new Text(String.format("%012d", split));
        KeyExtent ke = new KeyExtent(tid, er, per);
        per = er;
        extents.add(ke);
    }
    extents.add(new KeyExtent(tid, null, per));
    if (args[0].equals("write")) {
        BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        for (KeyExtent extent : extents) {
            Mutation mut = extent.getPrevRowUpdateMutation();
            new TServerInstance(HostAndPort.fromParts("192.168.1.100", 4567), "DEADBEEF").putLocation(mut);
            bw.addMutation(mut);
        }
        bw.close();
    } else if (args[0].equals("writeFiles")) {
        BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        for (KeyExtent extent : extents) {
            Mutation mut = new Mutation(extent.getMetadataEntry());
            String dir = "/t-" + UUID.randomUUID();
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes(UTF_8)));
            for (int i = 0; i < 5; i++) {
                mut.put(DataFileColumnFamily.NAME, new Text(dir + "/00000_0000" + i + ".map"), new DataFileValue(10000, 1000000).encodeAsValue());
            }
            bw.addMutation(mut);
        }
        bw.close();
    } else if (args[0].equals("scan")) {
        int numThreads = Integer.parseInt(args[1]);
        final int numLoop = Integer.parseInt(args[2]);
        int numLookups = Integer.parseInt(args[3]);
        HashSet<Integer> indexes = new HashSet<>();
        while (indexes.size() < numLookups) {
            indexes.add(r.nextInt(extents.size()));
        }
        final List<Range> ranges = new ArrayList<>();
        for (Integer i : indexes) {
            ranges.add(extents.get(i).toMetadataRange());
        }
        Thread[] threads = new Thread[numThreads];
        for (int i = 0; i < threads.length; i++) {
            threads[i] = new Thread(new Runnable() {

                @Override
                public void run() {
                    try {
                        System.out.println(runScanTest(connector, numLoop, ranges));
                    } catch (Exception e) {
                        log.error("Exception while running scan test.", e);
                    }
                }
            });
        }
        long t1 = System.currentTimeMillis();
        for (Thread thread : threads) {
            thread.start();
        }
        for (Thread thread : threads) {
            thread.join();
        }
        long t2 = System.currentTimeMillis();
        System.out.printf("tt : %6.2f%n", (t2 - t1) / 1000.0);
    } else {
        throw new IllegalArgumentException();
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) ArrayList(java.util.ArrayList) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) Random(java.util.Random) TreeSet(java.util.TreeSet) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) HashSet(java.util.HashSet) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) ClientOpts(org.apache.accumulo.core.cli.ClientOpts) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 69 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class PermissionsIT method createTestTable.

private void createTestTable(Connector c, String testUser, String tableName) throws Exception, MutationsRejectedException {
    if (!c.tableOperations().exists(tableName)) {
        // create the test table
        c.tableOperations().create(tableName);
        // put in some initial data
        BatchWriter writer = c.createBatchWriter(tableName, new BatchWriterConfig());
        Mutation m = new Mutation(new Text("row"));
        m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
        writer.addMutation(m);
        writer.close();
        // verify proper permissions for creator and test user
        verifyHasOnlyTheseTablePermissions(c, c.whoami(), tableName, TablePermission.values());
        verifyHasNoTablePermissions(c, testUser, tableName, TablePermission.values());
    }
}
Also used : Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 70 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class PermissionsIT method testGrantedTablePermission.

private void testGrantedTablePermission(Connector test_user_conn, ClusterUser normalUser, TablePermission perm, String tableName) throws AccumuloException, TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
    BatchWriter writer;
    Mutation m;
    log.debug("Confirming that the presence of the {} permission properly permits the user", perm);
    // test permission after granting it
    switch(perm) {
        case READ:
            try (Scanner scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY)) {
                Iterator<Entry<Key, Value>> iter = scanner.iterator();
                while (iter.hasNext()) iter.next();
            }
            break;
        case WRITE:
            writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
            m = new Mutation(new Text("row"));
            m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
            writer.addMutation(m);
            writer.close();
            break;
        case BULK_IMPORT:
            // test for bulk import permission would go here
            break;
        case ALTER_TABLE:
            Map<String, Set<Text>> groups = new HashMap<>();
            groups.put("tgroup", new HashSet<>(Arrays.asList(new Text("t1"), new Text("t2"))));
            break;
        case DROP_TABLE:
            test_user_conn.tableOperations().delete(tableName);
            break;
        case GRANT:
            test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), tableName, TablePermission.GRANT);
            break;
        case GET_SUMMARIES:
            List<Summary> summaries = test_user_conn.tableOperations().summaries(tableName).retrieve();
            // just make sure it's not blocked by permissions, the actual summaries are tested in SummaryIT
            Assert.assertTrue(summaries.isEmpty());
            break;
        default:
            throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) Entry(java.util.Map.Entry) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Summary(org.apache.accumulo.core.client.summary.Summary) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Aggregations

BatchWriter (org.apache.accumulo.core.client.BatchWriter)402 Mutation (org.apache.accumulo.core.data.Mutation)360 Test (org.junit.Test)264 Value (org.apache.accumulo.core.data.Value)250 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)246 Text (org.apache.hadoop.io.Text)194 Key (org.apache.accumulo.core.data.Key)179 Scanner (org.apache.accumulo.core.client.Scanner)174 Connector (org.apache.accumulo.core.client.Connector)169 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)81 Authorizations (org.apache.accumulo.core.security.Authorizations)68 Range (org.apache.accumulo.core.data.Range)61 Entry (java.util.Map.Entry)51 Map (java.util.Map)50 BatchScanner (org.apache.accumulo.core.client.BatchScanner)46 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)44 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)40 HashMap (java.util.HashMap)38 ArrayList (java.util.ArrayList)36 Status (org.apache.accumulo.server.replication.proto.Replication.Status)32