Search in sources :

Example 21 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class BadIteratorMincIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    IteratorSetting is = new IteratorSetting(30, BadIterator.class);
    c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation(new Text("r1"));
    m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
    bw.addMutation(m);
    bw.close();
    c.tableOperations().flush(tableName, null, null, false);
    sleepUninterruptibly(1, TimeUnit.SECONDS);
    // minc should fail, so there should be no files
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
    // try to scan table
    try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
        int count = Iterators.size(scanner.iterator());
        assertEquals("Did not see expected # entries " + count, 1, count);
        // remove the bad iterator
        c.tableOperations().removeIterator(tableName, BadIterator.class.getSimpleName(), EnumSet.of(IteratorScope.minc));
        sleepUninterruptibly(5, TimeUnit.SECONDS);
        // minc should complete
        FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
        count = Iterators.size(scanner.iterator());
        if (count != 1)
            throw new Exception("Did not see expected # entries " + count);
        // now try putting bad iterator back and deleting the table
        c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
        bw = c.createBatchWriter(tableName, new BatchWriterConfig());
        m = new Mutation(new Text("r2"));
        m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
        bw.addMutation(m);
        bw.close();
        // make sure property is given time to propagate
        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
        c.tableOperations().flush(tableName, null, null, false);
        // make sure the flush has time to start
        sleepUninterruptibly(1, TimeUnit.SECONDS);
        // this should not hang
        c.tableOperations().delete(tableName);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 22 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class BatchWriterFlushIT method runFlushTest.

private void runFlushTest(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException, Exception {
    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
    try (Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
        Random r = new Random();
        for (int i = 0; i < 4; i++) {
            for (int j = 0; j < NUM_TO_FLUSH; j++) {
                int row = i * NUM_TO_FLUSH + j;
                Mutation m = new Mutation(new Text(String.format("r_%10d", row)));
                m.put(new Text("cf"), new Text("cq"), new Value(("" + row).getBytes()));
                bw.addMutation(m);
            }
            bw.flush();
            for (int k = 0; k < 10; k++) {
                int rowToLookup = r.nextInt(NUM_TO_FLUSH) + i * NUM_TO_FLUSH;
                scanner.setRange(new Range(new Text(String.format("r_%10d", rowToLookup))));
                Iterator<Entry<Key, Value>> iter = scanner.iterator();
                if (!iter.hasNext())
                    throw new Exception(" row " + rowToLookup + " not found after flush");
                Entry<Key, Value> entry = iter.next();
                if (iter.hasNext())
                    throw new Exception("Scanner returned too much");
                verifyEntry(rowToLookup, entry);
            }
            // scan all data just flushed
            scanner.setRange(new Range(new Text(String.format("r_%10d", i * NUM_TO_FLUSH)), true, new Text(String.format("r_%10d", (i + 1) * NUM_TO_FLUSH)), false));
            Iterator<Entry<Key, Value>> iter = scanner.iterator();
            for (int j = 0; j < NUM_TO_FLUSH; j++) {
                int row = i * NUM_TO_FLUSH + j;
                if (!iter.hasNext())
                    throw new Exception("Scan stopped permaturely at " + row);
                Entry<Key, Value> entry = iter.next();
                verifyEntry(row, entry);
            }
            if (iter.hasNext())
                throw new Exception("Scanner returned too much");
        }
        bw.close();
        // test adding a mutation to a closed batch writer
        boolean caught = false;
        try {
            bw.addMutation(new Mutation(new Text("foobar")));
        } catch (IllegalStateException ise) {
            caught = true;
        }
        if (!caught) {
            throw new Exception("Adding to closed batch writer did not fail");
        }
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Entry(java.util.Map.Entry) Random(java.util.Random) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key)

Example 23 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class DeletedTablesDontFlushIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    IteratorSetting setting = new IteratorSetting(100, SlowIterator.class);
    SlowIterator.setSleepTime(setting, 1000);
    c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.minc));
    // let the configuration change propagate through zookeeper
    UtilWaitThread.sleep(1000);
    Mutation m = new Mutation("xyzzy");
    for (int i = 0; i < 100; i++) {
        m.put("cf", "" + i, new Value(new byte[] {}));
    }
    BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
    bw.addMutation(m);
    bw.close();
    // should go fast
    c.tableOperations().delete(tableName);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Mutation(org.apache.accumulo.core.data.Mutation) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Test(org.junit.Test)

Example 24 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class CreateAndUseIT method verifyDataIsPresent.

@Test
public void verifyDataIsPresent() throws Exception {
    Text cf = new Text("cf1");
    Text cq = new Text("cq1");
    String tableName = getUniqueNames(1)[0];
    getConnector().tableOperations().create(tableName);
    getConnector().tableOperations().addSplits(tableName, splits);
    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
    for (int i = 1; i < 257; i++) {
        Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 16)));
        m.put(cf, cq, new Value(Integer.toString(i).getBytes(UTF_8)));
        bw.addMutation(m);
    }
    bw.close();
    try (Scanner scanner1 = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
        int ei = 1;
        for (Entry<Key, Value> entry : scanner1) {
            Assert.assertEquals(String.format("%08x", (ei << 8) - 16), entry.getKey().getRow().toString());
            Assert.assertEquals(Integer.toString(ei), entry.getValue().toString());
            ei++;
        }
        Assert.assertEquals("Did not see expected number of rows", 257, ei);
    }
}
Also used : BatchScanner(org.apache.accumulo.core.client.BatchScanner) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 25 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class DeleteEverythingIT method run.

@Test
public void run() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation(new Text("foo"));
    m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(UTF_8)));
    bw.addMutation(m);
    bw.flush();
    getConnector().tableOperations().flush(tableName, null, null, true);
    FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
    m = new Mutation(new Text("foo"));
    m.putDelete(new Text("bar"), new Text("1910"));
    bw.addMutation(m);
    bw.flush();
    try (Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
        scanner.setRange(new Range());
        int count = Iterators.size(scanner.iterator());
        assertEquals("count == " + count, 0, count);
        getConnector().tableOperations().flush(tableName, null, null, true);
        getConnector().tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
        sleepUninterruptibly(4, TimeUnit.SECONDS);
        FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
        bw.close();
        count = Iterables.size(scanner);
        if (count != 0) {
            throw new Exception("count == " + count);
        }
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Range(org.apache.accumulo.core.data.Range) Test(org.junit.Test)

Aggregations

BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)282 BatchWriter (org.apache.accumulo.core.client.BatchWriter)246 Mutation (org.apache.accumulo.core.data.Mutation)224 Test (org.junit.Test)171 Value (org.apache.accumulo.core.data.Value)166 Connector (org.apache.accumulo.core.client.Connector)142 Scanner (org.apache.accumulo.core.client.Scanner)121 Key (org.apache.accumulo.core.data.Key)121 Text (org.apache.hadoop.io.Text)119 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)50 Entry (java.util.Map.Entry)42 Range (org.apache.accumulo.core.data.Range)42 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 BatchScanner (org.apache.accumulo.core.client.BatchScanner)36 Authorizations (org.apache.accumulo.core.security.Authorizations)36 AccumuloException (org.apache.accumulo.core.client.AccumuloException)35 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)32 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)29 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)29 HashMap (java.util.HashMap)24