Search in sources :

Example 56 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class IMMLGBenchmark method write.

private static long write(Connector conn, ArrayList<byte[]> cfset, String table) throws TableNotFoundException, MutationsRejectedException {
    Random rand = new Random();
    byte[] val = new byte[50];
    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
    long t1 = System.currentTimeMillis();
    for (int i = 0; i < 1 << 15; i++) {
        byte[] row = FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0]);
        Mutation m = new Mutation(row);
        for (byte[] cf : cfset) {
            byte[] cq = FastFormat.toZeroPaddedString(rand.nextInt(1 << 16), 4, 16, new byte[0]);
            rand.nextBytes(val);
            m.put(cf, cq, val);
        }
        bw.addMutation(m);
    }
    bw.close();
    long t2 = System.currentTimeMillis();
    return t2 - t1;
}
Also used : Random(java.util.Random) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 57 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class MasterRepairsDualAssignmentIT method test.

@Test
public void test() throws Exception {
    // make some tablets, spread 'em around
    Connector c = getConnector();
    ClientContext context = new ClientContext(c.getInstance(), new Credentials("root", new PasswordToken(ROOT_PASSWORD)), getClientConfig());
    String table = this.getUniqueNames(1)[0];
    c.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
    c.securityOperations().grantTablePermission("root", RootTable.NAME, TablePermission.WRITE);
    c.tableOperations().create(table);
    SortedSet<Text> partitions = new TreeSet<>();
    for (String part : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
        partitions.add(new Text(part));
    }
    c.tableOperations().addSplits(table, partitions);
    // scan the metadata table and get the two table location states
    Set<TServerInstance> states = new HashSet<>();
    Set<TabletLocationState> oldLocations = new HashSet<>();
    MetaDataStateStore store = new MetaDataStateStore(context, null);
    while (states.size() < 2) {
        UtilWaitThread.sleep(250);
        oldLocations.clear();
        for (TabletLocationState tls : store) {
            if (tls.current != null) {
                states.add(tls.current);
                oldLocations.add(tls);
            }
        }
    }
    assertEquals(2, states.size());
    // Kill a tablet server... we don't care which one... wait for everything to be reassigned
    cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
    Set<TServerInstance> replStates = new HashSet<>();
    // Find out which tablet server remains
    while (true) {
        UtilWaitThread.sleep(1000);
        states.clear();
        replStates.clear();
        boolean allAssigned = true;
        for (TabletLocationState tls : store) {
            if (tls != null && tls.current != null) {
                states.add(tls.current);
            } else if (tls != null && tls.extent.equals(new KeyExtent(ReplicationTable.ID, null, null))) {
                replStates.add(tls.current);
            } else {
                allAssigned = false;
            }
        }
        System.out.println(states + " size " + states.size() + " allAssigned " + allAssigned);
        if (states.size() != 2 && allAssigned)
            break;
    }
    assertEquals(1, replStates.size());
    assertEquals(1, states.size());
    // pick an assigned tablet and assign it to the old tablet
    TabletLocationState moved = null;
    for (TabletLocationState old : oldLocations) {
        if (!states.contains(old.current)) {
            moved = old;
        }
    }
    assertNotEquals(null, moved);
    // throw a mutation in as if we were the dying tablet
    BatchWriter bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    Mutation assignment = new Mutation(moved.extent.getMetadataEntry());
    moved.current.putLocation(assignment);
    bw.addMutation(assignment);
    bw.close();
    // wait for the master to fix the problem
    waitForCleanStore(store);
    // now jam up the metadata table
    bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
    assignment = new Mutation(new KeyExtent(MetadataTable.ID, null, null).getMetadataEntry());
    moved.current.putLocation(assignment);
    bw.addMutation(assignment);
    bw.close();
    waitForCleanStore(new RootTabletStateStore(context, null));
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ClientContext(org.apache.accumulo.core.client.impl.ClientContext) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) MetaDataStateStore(org.apache.accumulo.server.master.state.MetaDataStateStore) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) TreeSet(java.util.TreeSet) RootTabletStateStore(org.apache.accumulo.server.master.state.RootTabletStateStore) TabletLocationState(org.apache.accumulo.server.master.state.TabletLocationState) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Credentials(org.apache.accumulo.core.client.impl.Credentials) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 58 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo-examples by apache.

the class CountIT method setupInstance.

@Before
public void setupInstance() throws Exception {
    tableName = getUniqueNames(1)[0];
    conn = getConnector();
    conn.tableOperations().create(tableName);
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    ColumnVisibility cv = new ColumnVisibility();
    // / has 1 dir
    // /local has 2 dirs 1 file
    // /local/user1 has 2 files
    bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
    bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
    bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
    bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
    bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
    bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
    bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
    bw.close();
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) Before(org.junit.Before)

Example 59 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo-examples by apache.

the class ChunkInputFormatIT method testInfoWithoutChunks.

@Test
public void testInfoWithoutChunks() throws Exception {
    conn.tableOperations().create(tableName);
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    for (Entry<Key, Value> e : baddata) {
        Key k = e.getKey();
        Mutation m = new Mutation(k.getRow());
        m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
        bw.addMutation(m);
    }
    bw.close();
    assertEquals(0, CIFTester.main(tableName, CIFTester.TestBadData.class.getName()));
    assertEquals(1, assertionErrors.get(tableName).size());
}
Also used : Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 60 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo-examples by apache.

the class ChunkInputStreamIT method testWithAccumulo.

@Test
public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
    conn.tableOperations().create(tableName);
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    for (Entry<Key, Value> e : data) {
        Key k = e.getKey();
        Mutation m = new Mutation(k.getRow());
        m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), e.getValue());
        bw.addMutation(m);
    }
    bw.close();
    Scanner scan = conn.createScanner(tableName, AUTHS);
    ChunkInputStream cis = new ChunkInputStream();
    byte[] b = new byte[20];
    int read;
    PeekingIterator<Entry<Key, Value>> pi = new PeekingIterator<>(scan.iterator());
    cis.setSource(pi);
    assertEquals(read = cis.read(b), 8);
    assertEquals(new String(b, 0, read), "asdfjkl;");
    assertEquals(read = cis.read(b), -1);
    cis.setSource(pi);
    assertEquals(read = cis.read(b), 10);
    assertEquals(new String(b, 0, read), "qwertyuiop");
    assertEquals(read = cis.read(b), -1);
    assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
    cis.close();
    cis.setSource(pi);
    assertEquals(read = cis.read(b), 16);
    assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
    assertEquals(read = cis.read(b), -1);
    assertEquals(cis.getVisibilities().toString(), "[A&B]");
    cis.close();
    cis.setSource(pi);
    assertEquals(read = cis.read(b), -1);
    cis.close();
    cis.setSource(pi);
    assertEquals(read = cis.read(b), 8);
    assertEquals(new String(b, 0, read), "asdfjkl;");
    assertEquals(read = cis.read(b), -1);
    cis.close();
    assertFalse(pi.hasNext());
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) PeekingIterator(org.apache.accumulo.core.util.PeekingIterator) Entry(java.util.Map.Entry) KeyValue(org.apache.accumulo.core.data.KeyValue) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)182 BatchWriter (org.apache.accumulo.core.client.BatchWriter)135 Mutation (org.apache.accumulo.core.data.Mutation)131 Value (org.apache.accumulo.core.data.Value)88 Text (org.apache.hadoop.io.Text)60 Key (org.apache.accumulo.core.data.Key)59 Test (org.junit.Test)58 Scanner (org.apache.accumulo.core.client.Scanner)57 Connector (org.apache.accumulo.core.client.Connector)38 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)33 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)26 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)24 Authorizations (org.apache.accumulo.core.security.Authorizations)22 Range (org.apache.accumulo.core.data.Range)20 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)19 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)19 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)19 Entry (java.util.Map.Entry)18 IOException (java.io.IOException)14