Search in sources :

Example 21 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class BatchWriterFlushIT method runFlushTest.

private void runFlushTest(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException, Exception {
    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
    try (Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
        Random r = new Random();
        for (int i = 0; i < 4; i++) {
            for (int j = 0; j < NUM_TO_FLUSH; j++) {
                int row = i * NUM_TO_FLUSH + j;
                Mutation m = new Mutation(new Text(String.format("r_%10d", row)));
                m.put(new Text("cf"), new Text("cq"), new Value(("" + row).getBytes()));
                bw.addMutation(m);
            }
            bw.flush();
            for (int k = 0; k < 10; k++) {
                int rowToLookup = r.nextInt(NUM_TO_FLUSH) + i * NUM_TO_FLUSH;
                scanner.setRange(new Range(new Text(String.format("r_%10d", rowToLookup))));
                Iterator<Entry<Key, Value>> iter = scanner.iterator();
                if (!iter.hasNext())
                    throw new Exception(" row " + rowToLookup + " not found after flush");
                Entry<Key, Value> entry = iter.next();
                if (iter.hasNext())
                    throw new Exception("Scanner returned too much");
                verifyEntry(rowToLookup, entry);
            }
            // scan all data just flushed
            scanner.setRange(new Range(new Text(String.format("r_%10d", i * NUM_TO_FLUSH)), true, new Text(String.format("r_%10d", (i + 1) * NUM_TO_FLUSH)), false));
            Iterator<Entry<Key, Value>> iter = scanner.iterator();
            for (int j = 0; j < NUM_TO_FLUSH; j++) {
                int row = i * NUM_TO_FLUSH + j;
                if (!iter.hasNext())
                    throw new Exception("Scan stopped permaturely at " + row);
                Entry<Key, Value> entry = iter.next();
                verifyEntry(row, entry);
            }
            if (iter.hasNext())
                throw new Exception("Scanner returned too much");
        }
        bw.close();
        // test adding a mutation to a closed batch writer
        boolean caught = false;
        try {
            bw.addMutation(new Mutation(new Text("foobar")));
        } catch (IllegalStateException ise) {
            caught = true;
        }
        if (!caught) {
            throw new Exception("Adding to closed batch writer did not fail");
        }
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Entry(java.util.Map.Entry) Random(java.util.Random) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key)

Example 22 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class DeletedTablesDontFlushIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    IteratorSetting setting = new IteratorSetting(100, SlowIterator.class);
    SlowIterator.setSleepTime(setting, 1000);
    c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.minc));
    // let the configuration change propagate through zookeeper
    UtilWaitThread.sleep(1000);
    Mutation m = new Mutation("xyzzy");
    for (int i = 0; i < 100; i++) {
        m.put("cf", "" + i, new Value(new byte[] {}));
    }
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    bw.addMutation(m);
    bw.close();
    // should go fast
    c.tableOperations().delete(tableName);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Mutation(org.apache.accumulo.core.data.Mutation) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Test(org.junit.Test)

Example 23 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class CreateAndUseIT method verifyDataIsPresent.

@Test
public void verifyDataIsPresent() throws Exception {
    Text cf = new Text("cf1");
    Text cq = new Text("cq1");
    String tableName = getUniqueNames(1)[0];
    getConnector().tableOperations().create(tableName);
    getConnector().tableOperations().addSplits(tableName, splits);
    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
    for (int i = 1; i < 257; i++) {
        Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 16)));
        m.put(cf, cq, new Value(Integer.toString(i).getBytes(UTF_8)));
        bw.addMutation(m);
    }
    bw.close();
    try (Scanner scanner1 = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
        int ei = 1;
        for (Entry<Key, Value> entry : scanner1) {
            Assert.assertEquals(String.format("%08x", (ei << 8) - 16), entry.getKey().getRow().toString());
            Assert.assertEquals(Integer.toString(ei), entry.getValue().toString());
            ei++;
        }
        Assert.assertEquals("Did not see expected number of rows", 257, ei);
    }
}
Also used : BatchScanner(org.apache.accumulo.core.client.BatchScanner) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 24 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class DeleteEverythingIT method run.

@Test
public void run() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation(new Text("foo"));
    m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(UTF_8)));
    bw.addMutation(m);
    bw.flush();
    getConnector().tableOperations().flush(tableName, null, null, true);
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
    m = new Mutation(new Text("foo"));
    m.putDelete(new Text("bar"), new Text("1910"));
    bw.addMutation(m);
    bw.flush();
    try (Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
        scanner.setRange(new Range());
        int count = Iterators.size(scanner.iterator());
        assertEquals("count == " + count, 0, count);
        getConnector().tableOperations().flush(tableName, null, null, true);
        getConnector().tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
        sleepUninterruptibly(4, TimeUnit.SECONDS);
        FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
        bw.close();
        count = Iterables.size(scanner);
        if (count != 0) {
            throw new Exception("count == " + count);
        }
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Range(org.apache.accumulo.core.data.Range) Test(org.junit.Test)

Example 25 with BatchWriter

use of org.apache.accumulo.core.client.BatchWriter in project accumulo by apache.

the class KeyValueEqualityIT method testEquality.

@Test
public void testEquality() throws Exception {
    Connector conn = this.getConnector();
    final BatchWriterConfig config = new BatchWriterConfig();
    final String[] tables = getUniqueNames(2);
    final String table1 = tables[0], table2 = tables[1];
    final TableOperations tops = conn.tableOperations();
    tops.create(table1);
    tops.create(table2);
    final BatchWriter bw1 = conn.createBatchWriter(table1, config), bw2 = conn.createBatchWriter(table2, config);
    for (int row = 0; row < 100; row++) {
        Mutation m = new Mutation(Integer.toString(row));
        for (int col = 0; col < 10; col++) {
            m.put(Integer.toString(col), "", System.currentTimeMillis(), Integer.toString(col * 2));
        }
        bw1.addMutation(m);
        bw2.addMutation(m);
    }
    bw1.close();
    bw2.close();
    Iterator<Entry<Key, Value>> t1 = conn.createScanner(table1, Authorizations.EMPTY).iterator(), t2 = conn.createScanner(table2, Authorizations.EMPTY).iterator();
    while (t1.hasNext() && t2.hasNext()) {
        // KeyValue, the implementation of Entry<Key,Value>, should support equality and hashCode properly
        Entry<Key, Value> e1 = t1.next(), e2 = t2.next();
        Assert.assertEquals(e1, e2);
        Assert.assertEquals(e1.hashCode(), e2.hashCode());
    }
    Assert.assertFalse("table1 had more data to read", t1.hasNext());
    Assert.assertFalse("table2 had more data to read", t2.hasNext());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Entry(java.util.Map.Entry) TableOperations(org.apache.accumulo.core.client.admin.TableOperations) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

BatchWriter (org.apache.accumulo.core.client.BatchWriter)402 Mutation (org.apache.accumulo.core.data.Mutation)360 Test (org.junit.Test)264 Value (org.apache.accumulo.core.data.Value)250 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)246 Text (org.apache.hadoop.io.Text)194 Key (org.apache.accumulo.core.data.Key)179 Scanner (org.apache.accumulo.core.client.Scanner)174 Connector (org.apache.accumulo.core.client.Connector)169 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)81 Authorizations (org.apache.accumulo.core.security.Authorizations)68 Range (org.apache.accumulo.core.data.Range)61 Entry (java.util.Map.Entry)51 Map (java.util.Map)50 BatchScanner (org.apache.accumulo.core.client.BatchScanner)46 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)44 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)40 HashMap (java.util.HashMap)38 ArrayList (java.util.ArrayList)36 Status (org.apache.accumulo.server.replication.proto.Replication.Status)32