Search in sources :

Example 41 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class KeyValueEqualityIT method testEquality.

@Test
public void testEquality() throws Exception {
    Connector conn = this.getConnector();
    final BatchWriterConfig config = new BatchWriterConfig();
    final String[] tables = getUniqueNames(2);
    final String table1 = tables[0], table2 = tables[1];
    final TableOperations tops = conn.tableOperations();
    tops.create(table1);
    tops.create(table2);
    final BatchWriter bw1 = conn.createBatchWriter(table1, config), bw2 = conn.createBatchWriter(table2, config);
    for (int row = 0; row < 100; row++) {
        Mutation m = new Mutation(Integer.toString(row));
        for (int col = 0; col < 10; col++) {
            m.put(Integer.toString(col), "", System.currentTimeMillis(), Integer.toString(col * 2));
        }
        bw1.addMutation(m);
        bw2.addMutation(m);
    }
    bw1.close();
    bw2.close();
    Iterator<Entry<Key, Value>> t1 = conn.createScanner(table1, Authorizations.EMPTY).iterator(), t2 = conn.createScanner(table2, Authorizations.EMPTY).iterator();
    while (t1.hasNext() && t2.hasNext()) {
        // KeyValue, the implementation of Entry<Key,Value>, should support equality and hashCode properly
        Entry<Key, Value> e1 = t1.next(), e2 = t2.next();
        Assert.assertEquals(e1, e2);
        Assert.assertEquals(e1.hashCode(), e2.hashCode());
    }
    Assert.assertFalse("table1 had more data to read", t1.hasNext());
    Assert.assertFalse("table2 had more data to read", t2.hasNext());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Entry(java.util.Map.Entry) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 42 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class LargeSplitRowIT method userAddedSplit.

// User added split
@Test(timeout = 60 * 1000)
public void userAddedSplit() throws Exception {
    log.info("User added split");
    // make a table and lower the TABLE_END_ROW_MAX_SIZE property
    final String tableName = getUniqueNames(1)[0];
    final Connector conn = getConnector();
    conn.tableOperations().create(tableName);
    conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
    // Create a BatchWriter and add a mutation to the table
    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("Row");
    m.put("cf", "cq", "value");
    batchWriter.addMutation(m);
    batchWriter.close();
    // Create a split point that is too large to be an end row and fill it with all 'm'
    SortedSet<Text> partitionKeys = new TreeSet<>();
    byte[] data = new byte[(int) (ConfigurationTypeHelper.getFixedMemoryAsBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
    for (int i = 0; i < data.length; i++) {
        data[i] = 'm';
    }
    partitionKeys.add(new Text(data));
    // try to add the split point that is too large, if the split point is created the test fails.
    try {
        conn.tableOperations().addSplits(tableName, partitionKeys);
        Assert.fail();
    } catch (AccumuloServerException e) {
    }
    // Make sure that the information that was written to the table before we tried to add the split point is still correct
    int counter = 0;
    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
        for (Entry<Key, Value> entry : scanner) {
            counter++;
            Key k = entry.getKey();
            Assert.assertEquals("Row", k.getRow().toString());
            Assert.assertEquals("cf", k.getColumnFamily().toString());
            Assert.assertEquals("cq", k.getColumnQualifier().toString());
            Assert.assertEquals("value", entry.getValue().toString());
        }
    }
    // Make sure there is only one line in the table
    Assert.assertEquals(1, counter);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) AccumuloServerException(org.apache.accumulo.core.client.impl.AccumuloServerException) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 43 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class LargeSplitRowIT method automaticSplitLater.

@Test(timeout = 60 * 1000)
public void automaticSplitLater() throws Exception {
    log.info("Split later");
    automaticSplit(15, 1);
    final Connector conn = getConnector();
    String tableName = new String();
    java.util.Iterator<String> iterator = conn.tableOperations().list().iterator();
    while (iterator.hasNext()) {
        String curr = iterator.next();
        if (!curr.startsWith(Namespace.ACCUMULO + ".")) {
            tableName = curr;
        }
    }
    // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
    byte[] data = new byte[10];
    // Fill key with all j's except for last spot which alternates through 1 through 10 for every j value
    for (int j = 15; j < 150; j += 1) {
        for (int i = 0; i < data.length - 1; i++) {
            data[i] = (byte) j;
        }
        for (int i = 0; i < 25; i++) {
            data[data.length - 1] = (byte) i;
            Mutation m = new Mutation(data);
            m.put("cf", "cq", "value");
            batchWriter.addMutation(m);
        }
    }
    // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
    batchWriter.close();
    conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
    // Make sure a split occurs
    while (conn.tableOperations().listSplits(tableName).size() == 0) {
        Thread.sleep(250);
    }
    Assert.assertTrue(0 < conn.tableOperations().listSplits(tableName).size());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 44 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class LargeSplitRowIT method automaticSplit.

private void automaticSplit(int max, int spacing) throws Exception {
    // make a table and lower the configure properties
    final String tableName = getUniqueNames(1)[0];
    final Connector conn = getConnector();
    conn.tableOperations().create(tableName);
    conn.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
    conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
    conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
    conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
    // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
    BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
    byte[] data = new byte[(int) (ConfigurationTypeHelper.getFixedMemoryAsBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
    // Fill key with all j's except for last spot which alternates through 1 through 10 for every j value
    for (int j = 0; j < max; j += spacing) {
        for (int i = 0; i < data.length - 1; i++) {
            data[i] = (byte) j;
        }
        for (int i = 0; i < 10; i++) {
            data[data.length - 1] = (byte) i;
            Mutation m = new Mutation(data);
            m.put("cf", "cq", "value");
            batchWriter.addMutation(m);
        }
    }
    // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
    batchWriter.close();
    conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
    Thread.sleep(500);
    // Make sure all the data that was put in the table is still correct
    int count = 0;
    int extra = 10;
    try (Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY)) {
        for (Entry<Key, Value> entry : scanner) {
            if (extra == 10) {
                extra = 0;
                for (int i = 0; i < data.length - 1; i++) {
                    data[i] = (byte) count;
                }
                count += spacing;
            }
            Key k = entry.getKey();
            data[data.length - 1] = (byte) extra;
            String expected = new String(data, UTF_8);
            Assert.assertEquals(expected, k.getRow().toString());
            Assert.assertEquals("cf", k.getColumnFamily().toString());
            Assert.assertEquals("cq", k.getColumnQualifier().toString());
            Assert.assertEquals("value", entry.getValue().toString());
            extra++;
        }
    }
    Assert.assertEquals(10, extra);
    Assert.assertEquals(max, count);
    // Make sure no splits occured in the table
    Assert.assertEquals(0, conn.tableOperations().listSplits(tableName).size());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key)

Example 45 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class BalanceIT method testBalance.

@Test
public void testBalance() throws Exception {
    String tableName = getUniqueNames(1)[0];
    Connector c = getConnector();
    log.info("Creating table");
    c.tableOperations().create(tableName);
    SortedSet<Text> splits = new TreeSet<>();
    for (int i = 0; i < 10; i++) {
        splits.add(new Text("" + i));
    }
    log.info("Adding splits");
    c.tableOperations().addSplits(tableName, splits);
    log.info("Waiting for balance");
    c.instanceOperations().waitForBalance();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) TreeSet(java.util.TreeSet) Text(org.apache.hadoop.io.Text) Test(org.junit.Test)

Aggregations

Connector (org.apache.accumulo.core.client.Connector)622 Test (org.junit.Test)415 BatchWriter (org.apache.accumulo.core.client.BatchWriter)171 Value (org.apache.accumulo.core.data.Value)162 Text (org.apache.hadoop.io.Text)160 Scanner (org.apache.accumulo.core.client.Scanner)158 Mutation (org.apache.accumulo.core.data.Mutation)152 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)143 Key (org.apache.accumulo.core.data.Key)139 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)101 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)87 AccumuloException (org.apache.accumulo.core.client.AccumuloException)83 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)75 Range (org.apache.accumulo.core.data.Range)74 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)65 Authorizations (org.apache.accumulo.core.security.Authorizations)60 HashSet (java.util.HashSet)57 Instance (org.apache.accumulo.core.client.Instance)55 ArrayList (java.util.ArrayList)53 Entry (java.util.Map.Entry)53