Search in sources :

Example 26 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class KeyValueEqualityIT method testEquality.

@Test
public void testEquality() throws Exception {
    Connector conn = this.getConnector();
    final BatchWriterConfig config = new BatchWriterConfig();
    final String[] tables = getUniqueNames(2);
    final String table1 = tables[0], table2 = tables[1];
    final TableOperations tops = conn.tableOperations();
    tops.create(table1);
    tops.create(table2);
    final BatchWriter bw1 = conn.createBatchWriter(table1, config), bw2 = conn.createBatchWriter(table2, config);
    for (int row = 0; row < 100; row++) {
        Mutation m = new Mutation(Integer.toString(row));
        for (int col = 0; col < 10; col++) {
            m.put(Integer.toString(col), "", System.currentTimeMillis(), Integer.toString(col * 2));
        }
        bw1.addMutation(m);
        bw2.addMutation(m);
    }
    bw1.close();
    bw2.close();
    Iterator<Entry<Key, Value>> t1 = conn.createScanner(table1, Authorizations.EMPTY).iterator(), t2 = conn.createScanner(table2, Authorizations.EMPTY).iterator();
    while (t1.hasNext() && t2.hasNext()) {
        // KeyValue, the implementation of Entry<Key,Value>, should support equality and hashCode properly
        Entry<Key, Value> e1 = t1.next(), e2 = t2.next();
        Assert.assertEquals(e1, e2);
        Assert.assertEquals(e1.hashCode(), e2.hashCode());
    }
    Assert.assertFalse("table1 had more data to read", t1.hasNext());
    Assert.assertFalse("table2 had more data to read", t2.hasNext());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Entry(java.util.Map.Entry) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 27 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class LargeSplitRowIT method userAddedSplit.

// User added split
@Test(timeout = 60 * 1000)
public void userAddedSplit() throws Exception {
    log.info("User added split");
    // make a table and lower the TABLE_END_ROW_MAX_SIZE property
    final String tableName = getUniqueNames(1)[0];
    final Connector conn = getConnector();
    conn.tableOperations().create(tableName);
    conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
    // Create a BatchWriter and add a mutation to the table
    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("Row");
    m.put("cf", "cq", "value");
    batchWriter.addMutation(m);
    batchWriter.close();
    // Create a split point that is too large to be an end row and fill it with all 'm'
    SortedSet<Text> partitionKeys = new TreeSet<>();
    byte[] data = new byte[(int) (ConfigurationTypeHelper.getFixedMemoryAsBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
    for (int i = 0; i < data.length; i++) {
        data[i] = 'm';
    }
    partitionKeys.add(new Text(data));
    // try to add the split point that is too large, if the split point is created the test fails.
    try {
        conn.tableOperations().addSplits(tableName, partitionKeys);
        Assert.fail();
    } catch (AccumuloServerException e) {
    }
    // Make sure that the information that was written to the table before we tried to add the split point is still correct
    int counter = 0;
    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
        for (Entry<Key, Value> entry : scanner) {
            counter++;
            Key k = entry.getKey();
            Assert.assertEquals("Row", k.getRow().toString());
            Assert.assertEquals("cf", k.getColumnFamily().toString());
            Assert.assertEquals("cq", k.getColumnQualifier().toString());
            Assert.assertEquals("value", entry.getValue().toString());
        }
    }
    // Make sure there is only one line in the table
    Assert.assertEquals(1, counter);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) AccumuloServerException(org.apache.accumulo.core.client.impl.AccumuloServerException) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 28 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class LargeSplitRowIT method automaticSplitLater.

@Test(timeout = 60 * 1000)
public void automaticSplitLater() throws Exception {
    log.info("Split later");
    automaticSplit(15, 1);
    final Connector conn = getConnector();
    String tableName = new String();
    java.util.Iterator<String> iterator = conn.tableOperations().list().iterator();
    while (iterator.hasNext()) {
        String curr = iterator.next();
        if (!curr.startsWith(Namespace.ACCUMULO + ".")) {
            tableName = curr;
        }
    }
    // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
    byte[] data = new byte[10];
    // Fill key with all j's except for last spot which alternates through 1 through 10 for every j value
    for (int j = 15; j < 150; j += 1) {
        for (int i = 0; i < data.length - 1; i++) {
            data[i] = (byte) j;
        }
        for (int i = 0; i < 25; i++) {
            data[data.length - 1] = (byte) i;
            Mutation m = new Mutation(data);
            m.put("cf", "cq", "value");
            batchWriter.addMutation(m);
        }
    }
    // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
    batchWriter.close();
    conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
    // Make sure a split occurs
    while (conn.tableOperations().listSplits(tableName).size() == 0) {
        Thread.sleep(250);
    }
    Assert.assertTrue(0 < conn.tableOperations().listSplits(tableName).size());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 29 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class LargeSplitRowIT method automaticSplit.

private void automaticSplit(int max, int spacing) throws Exception {
    // make a table and lower the configure properties
    final String tableName = getUniqueNames(1)[0];
    final Connector conn = getConnector();
    conn.tableOperations().create(tableName);
    conn.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
    conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
    conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
    conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
    // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
    byte[] data = new byte[(int) (ConfigurationTypeHelper.getFixedMemoryAsBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
    // Fill key with all j's except for last spot which alternates through 1 through 10 for every j value
    for (int j = 0; j < max; j += spacing) {
        for (int i = 0; i < data.length - 1; i++) {
            data[i] = (byte) j;
        }
        for (int i = 0; i < 10; i++) {
            data[data.length - 1] = (byte) i;
            Mutation m = new Mutation(data);
            m.put("cf", "cq", "value");
            batchWriter.addMutation(m);
        }
    }
    // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
    batchWriter.close();
    conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
    Thread.sleep(500);
    // Make sure all the data that was put in the table is still correct
    int count = 0;
    int extra = 10;
    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
        for (Entry<Key, Value> entry : scanner) {
            if (extra == 10) {
                extra = 0;
                for (int i = 0; i < data.length - 1; i++) {
                    data[i] = (byte) count;
                }
                count += spacing;
            }
            Key k = entry.getKey();
            data[data.length - 1] = (byte) extra;
            String expected = new String(data, UTF_8);
            Assert.assertEquals(expected, k.getRow().toString());
            Assert.assertEquals("cf", k.getColumnFamily().toString());
            Assert.assertEquals("cq", k.getColumnQualifier().toString());
            Assert.assertEquals("value", entry.getValue().toString());
            extra++;
        }
    }
    Assert.assertEquals(10, extra);
    Assert.assertEquals(max, count);
    // Make sure no splits occured in the table
    Assert.assertEquals(0, conn.tableOperations().listSplits(tableName).size());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key)

Example 30 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class BatchWriterIT method test.

@Test
public void test() throws Exception {
    // call the batchwriter with buffer of size zero
    String table = getUniqueNames(1)[0];
    Connector c = getConnector();
    c.tableOperations().create(table);
    BatchWriterConfig config = new BatchWriterConfig();
    config.setMaxMemory(0);
    BatchWriter writer = c.createBatchWriter(table, config);
    Mutation m = new Mutation("row");
    m.put("cf", "cq", new Value("value".getBytes()));
    writer.addMutation(m);
    writer.close();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Aggregations

BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)282 BatchWriter (org.apache.accumulo.core.client.BatchWriter)246 Mutation (org.apache.accumulo.core.data.Mutation)224 Test (org.junit.Test)171 Value (org.apache.accumulo.core.data.Value)166 Connector (org.apache.accumulo.core.client.Connector)142 Scanner (org.apache.accumulo.core.client.Scanner)121 Key (org.apache.accumulo.core.data.Key)121 Text (org.apache.hadoop.io.Text)119 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)50 Entry (java.util.Map.Entry)42 Range (org.apache.accumulo.core.data.Range)42 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 BatchScanner (org.apache.accumulo.core.client.BatchScanner)36 Authorizations (org.apache.accumulo.core.security.Authorizations)36 AccumuloException (org.apache.accumulo.core.client.AccumuloException)35 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)32 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)29 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)29 HashMap (java.util.HashMap)24