Search in sources :

Example 16 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class ConfigurableCompactionIT method writeFlush.

private void writeFlush(Connector conn, String tablename, String row) throws Exception {
    BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
    Mutation m = new Mutation(row);
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    conn.tableOperations().flush(tablename, null, null, true);
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 17 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class VolumeChooserIT method writeDataToTable.

public static void writeDataToTable(Connector connector, String tableName) throws Exception {
    // Write some data to the table
    BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
    for (String s : rows) {
        Mutation m = new Mutation(new Text(s));
        m.put(EMPTY, EMPTY, EMPTY_VALUE);
        bw.addMutation(m);
    }
    bw.close();
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 18 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class VolumeIT method testRelativePaths.

@Test
public void testRelativePaths() throws Exception {
    List<String> expected = new ArrayList<>();
    Connector connector = getConnector();
    String tableName = getUniqueNames(1)[0];
    connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
    Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
    SortedSet<Text> partitions = new TreeSet<>();
    // with some splits
    for (String s : "c,g,k,p,s,v".split(",")) partitions.add(new Text(s));
    connector.tableOperations().addSplits(tableName, partitions);
    BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
    // create two files in each tablet
    String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
    for (String s : rows) {
        Mutation m = new Mutation(s);
        m.put("cf1", "cq1", "1");
        bw.addMutation(m);
        expected.add(s + ":cf1:cq1:1");
    }
    bw.flush();
    connector.tableOperations().flush(tableName, null, null, true);
    for (String s : rows) {
        Mutation m = new Mutation(s);
        m.put("cf1", "cq1", "2");
        bw.addMutation(m);
        expected.add(s + ":cf1:cq1:2");
    }
    bw.close();
    connector.tableOperations().flush(tableName, null, null, true);
    verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
    connector.tableOperations().offline(tableName, true);
    connector.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
    try (Scanner metaScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        metaScanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
        BatchWriter mbw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        for (Entry<Key, Value> entry : metaScanner) {
            String cq = entry.getKey().getColumnQualifier().toString();
            if (cq.startsWith(v1.toString())) {
                Path path = new Path(cq);
                String relPath = "/" + path.getParent().getName() + "/" + path.getName();
                Mutation fileMut = new Mutation(entry.getKey().getRow());
                fileMut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
                fileMut.put(entry.getKey().getColumnFamily().toString(), relPath, entry.getValue().toString());
                mbw.addMutation(fileMut);
            }
        }
        mbw.close();
        connector.tableOperations().online(tableName, true);
        verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
        connector.tableOperations().compact(tableName, null, null, true, true);
        verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
        for (Entry<Key, Value> entry : metaScanner) {
            String cq = entry.getKey().getColumnQualifier().toString();
            Path path = new Path(cq);
            Assert.assertTrue("relative path not deleted " + path.toString(), path.depth() > 2);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 19 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class VolumeIT method writeData.

private void writeData(String tableName, Connector conn) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException {
    TreeSet<Text> splits = new TreeSet<>();
    for (int i = 1; i < 100; i++) {
        splits.add(new Text(String.format("%06d", i * 100)));
    }
    conn.tableOperations().create(tableName);
    conn.tableOperations().addSplits(tableName, splits);
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    for (int i = 0; i < 100; i++) {
        String row = String.format("%06d", i * 100 + 3);
        Mutation m = new Mutation(row);
        m.put("cf1", "cq1", "1");
        bw.addMutation(m);
    }
    bw.close();
}
Also used : TreeSet(java.util.TreeSet) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 20 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class UserCompactionStrategyIT method writeFlush.

private void writeFlush(Connector conn, String tablename, String row) throws Exception {
    BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
    Mutation m = new Mutation(row);
    m.put("", "", "");
    bw.addMutation(m);
    bw.close();
    conn.tableOperations().flush(tablename, null, null, true);
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Aggregations

BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)282 BatchWriter (org.apache.accumulo.core.client.BatchWriter)246 Mutation (org.apache.accumulo.core.data.Mutation)224 Test (org.junit.Test)171 Value (org.apache.accumulo.core.data.Value)166 Connector (org.apache.accumulo.core.client.Connector)142 Scanner (org.apache.accumulo.core.client.Scanner)121 Key (org.apache.accumulo.core.data.Key)121 Text (org.apache.hadoop.io.Text)119 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)50 Entry (java.util.Map.Entry)42 Range (org.apache.accumulo.core.data.Range)42 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 BatchScanner (org.apache.accumulo.core.client.BatchScanner)36 Authorizations (org.apache.accumulo.core.security.Authorizations)36 AccumuloException (org.apache.accumulo.core.client.AccumuloException)35 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)32 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)29 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)29 HashMap (java.util.HashMap)24