Search in sources :

Example 86 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class MasterAssignmentIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    String tableName = super.getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    String tableId = c.tableOperations().tableIdMap().get(tableName);
    // wait for the table to be online
    TabletLocationState newTablet;
    do {
        UtilWaitThread.sleep(250);
        newTablet = getTabletLocationState(c, tableId);
    } while (newTablet.current == null);
    assertNull(newTablet.last);
    assertNull(newTablet.future);
    // put something in it
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("a");
    m.put("b", "c", "d");
    bw.addMutation(m);
    bw.close();
    // give it a last location
    c.tableOperations().flush(tableName, null, null, true);
    TabletLocationState flushed = getTabletLocationState(c, tableId);
    assertEquals(newTablet.current, flushed.current);
    assertEquals(flushed.current, flushed.last);
    assertNull(newTablet.future);
    // take the tablet offline
    c.tableOperations().offline(tableName, true);
    TabletLocationState offline = getTabletLocationState(c, tableId);
    assertNull(offline.future);
    assertNull(offline.current);
    assertEquals(flushed.current, offline.last);
    // put it back online
    c.tableOperations().online(tableName, true);
    TabletLocationState online = getTabletLocationState(c, tableId);
    assertNull(online.future);
    assertNotNull(online.current);
    assertEquals(online.current, online.last);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) TabletLocationState(org.apache.accumulo.server.master.state.TabletLocationState) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 87 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class MergeIT method merge.

@Test
public void merge() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
    BatchWriter bw = c.createBatchWriter(tableName, null);
    for (String row : "a b c d e f g h i j k".split(" ")) {
        Mutation m = new Mutation(row);
        m.put("cf", "cq", "value");
        bw.addMutation(m);
    }
    bw.close();
    c.tableOperations().flush(tableName, null, null, true);
    c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
    assertEquals(8, c.tableOperations().listSplits(tableName).size());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 88 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class MergeIT method runMergeTest.

private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception {
    System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
    conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
    TreeSet<Text> splitSet = new TreeSet<>();
    for (String split : splits) {
        splitSet.add(new Text(split));
    }
    conn.tableOperations().addSplits(table, splitSet);
    BatchWriter bw = conn.createBatchWriter(table, null);
    HashSet<String> expected = new HashSet<>();
    for (String row : inserts) {
        Mutation m = new Mutation(row);
        m.put("cf", "cq", row);
        bw.addMutation(m);
        expected.add(row);
    }
    bw.close();
    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
    try (Scanner scanner = conn.createScanner(table, Authorizations.EMPTY)) {
        HashSet<String> observed = new HashSet<>();
        for (Entry<Key, Value> entry : scanner) {
            String row = entry.getKey().getRowData().toString();
            if (!observed.add(row)) {
                throw new Exception("Saw data twice " + table + " " + row);
            }
        }
        if (!observed.equals(expected)) {
            throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
        }
        HashSet<Text> currentSplits = new HashSet<>(conn.tableOperations().listSplits(table));
        HashSet<Text> ess = new HashSet<>();
        for (String es : expectedSplits) {
            ess.add(new Text(es));
        }
        if (!currentSplits.equals(ess)) {
            throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
        }
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) TabletDeletedException(org.apache.accumulo.server.util.TabletIterator.TabletDeletedException) ExpectedException(org.junit.rules.ExpectedException) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet)

Example 89 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class MetadataBatchScanTest method main.

public static void main(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(MetadataBatchScanTest.class.getName(), args);
    Instance inst = new ZooKeeperInstance(ClientConfiguration.create().withInstance("acu14").withZkHosts("localhost"));
    final Connector connector = inst.getConnector(opts.getPrincipal(), opts.getToken());
    TreeSet<Long> splits = new TreeSet<>();
    Random r = new Random(42);
    while (splits.size() < 99999) {
        splits.add((r.nextLong() & 0x7fffffffffffffffl) % 1000000000000l);
    }
    Table.ID tid = Table.ID.of("8");
    Text per = null;
    ArrayList<KeyExtent> extents = new ArrayList<>();
    for (Long split : splits) {
        Text er = new Text(String.format("%012d", split));
        KeyExtent ke = new KeyExtent(tid, er, per);
        per = er;
        extents.add(ke);
    }
    extents.add(new KeyExtent(tid, null, per));
    if (args[0].equals("write")) {
        BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        for (KeyExtent extent : extents) {
            Mutation mut = extent.getPrevRowUpdateMutation();
            new TServerInstance(HostAndPort.fromParts("192.168.1.100", 4567), "DEADBEEF").putLocation(mut);
            bw.addMutation(mut);
        }
        bw.close();
    } else if (args[0].equals("writeFiles")) {
        BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        for (KeyExtent extent : extents) {
            Mutation mut = new Mutation(extent.getMetadataEntry());
            String dir = "/t-" + UUID.randomUUID();
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes(UTF_8)));
            for (int i = 0; i < 5; i++) {
                mut.put(DataFileColumnFamily.NAME, new Text(dir + "/00000_0000" + i + ".map"), new DataFileValue(10000, 1000000).encodeAsValue());
            }
            bw.addMutation(mut);
        }
        bw.close();
    } else if (args[0].equals("scan")) {
        int numThreads = Integer.parseInt(args[1]);
        final int numLoop = Integer.parseInt(args[2]);
        int numLookups = Integer.parseInt(args[3]);
        HashSet<Integer> indexes = new HashSet<>();
        while (indexes.size() < numLookups) {
            indexes.add(r.nextInt(extents.size()));
        }
        final List<Range> ranges = new ArrayList<>();
        for (Integer i : indexes) {
            ranges.add(extents.get(i).toMetadataRange());
        }
        Thread[] threads = new Thread[numThreads];
        for (int i = 0; i < threads.length; i++) {
            threads[i] = new Thread(new Runnable() {

                @Override
                public void run() {
                    try {
                        System.out.println(runScanTest(connector, numLoop, ranges));
                    } catch (Exception e) {
                        log.error("Exception while running scan test.", e);
                    }
                }
            });
        }
        long t1 = System.currentTimeMillis();
        for (Thread thread : threads) {
            thread.start();
        }
        for (Thread thread : threads) {
            thread.join();
        }
        long t2 = System.currentTimeMillis();
        System.out.printf("tt : %6.2f%n", (t2 - t1) / 1000.0);
    } else {
        throw new IllegalArgumentException();
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) ArrayList(java.util.ArrayList) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) Random(java.util.Random) TreeSet(java.util.TreeSet) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) HashSet(java.util.HashSet) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) ClientOpts(org.apache.accumulo.core.cli.ClientOpts) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 90 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class PermissionsIT method createTestTable.

private void createTestTable(Connector c, String testUser, String tableName) throws Exception, MutationsRejectedException {
    if (!c.tableOperations().exists(tableName)) {
        // create the test table
        c.tableOperations().create(tableName);
        // put in some initial data
        BatchWriter writer = c.createBatchWriter(tableName, new BatchWriterConfig());
        Mutation m = new Mutation(new Text("row"));
        m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
        writer.addMutation(m);
        writer.close();
        // verify proper permissions for creator and test user
        verifyHasOnlyTheseTablePermissions(c, c.whoami(), tableName, TablePermission.values());
        verifyHasNoTablePermissions(c, testUser, tableName, TablePermission.values());
    }
}
Also used : Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Aggregations

Mutation (org.apache.accumulo.core.data.Mutation)601 BatchWriter (org.apache.accumulo.core.client.BatchWriter)358 Value (org.apache.accumulo.core.data.Value)341 Test (org.junit.Test)311 Text (org.apache.hadoop.io.Text)303 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)223 Key (org.apache.accumulo.core.data.Key)197 Scanner (org.apache.accumulo.core.client.Scanner)161 Connector (org.apache.accumulo.core.client.Connector)150 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)77 Authorizations (org.apache.accumulo.core.security.Authorizations)70 Range (org.apache.accumulo.core.data.Range)61 ArrayList (java.util.ArrayList)60 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)59 Entry (java.util.Map.Entry)57 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)55 Map (java.util.Map)53 HashMap (java.util.HashMap)44 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)43 BatchScanner (org.apache.accumulo.core.client.BatchScanner)41