Search in sources :

Example 1 with BatchScanOptions

use of org.apache.accumulo.proxy.thrift.BatchScanOptions in project accumulo by apache.

the class SimpleProxyBase method tableNotFound.

@Test
public void tableNotFound() throws Exception {
    final String doesNotExist = "doesNotExists";
    try {
        client.addConstraint(creds, doesNotExist, NumericValueConstraint.class.getName());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.addSplits(creds, doesNotExist, Collections.<ByteBuffer>emptySet());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    final IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "200"));
    try {
        client.attachIterator(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.cancelCompaction(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.checkIteratorConflicts(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.clearLocatorCache(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        final String TABLE_TEST = getUniqueNames(1)[0];
        client.cloneTable(creds, doesNotExist, TABLE_TEST, false, null, null);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.compactTable(creds, doesNotExist, null, null, null, true, false, null);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.createBatchScanner(creds, doesNotExist, new BatchScanOptions());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.createScanner(creds, doesNotExist, new ScanOptions());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.createWriter(creds, doesNotExist, new WriterOptions());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.deleteRows(creds, doesNotExist, null, null);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.deleteTable(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.exportTable(creds, doesNotExist, "/tmp");
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.flushTable(creds, doesNotExist, null, null, false);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getIteratorSetting(creds, doesNotExist, "foo", IteratorScope.SCAN);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getLocalityGroups(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getMaxRow(creds, doesNotExist, Collections.<ByteBuffer>emptySet(), null, false, null, false);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getTableProperties(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.grantTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.hasTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
        Path base = cluster.getTemporaryPath();
        Path importDir = new Path(base, "importDir");
        Path failuresDir = new Path(base, "failuresDir");
        assertTrue(cluster.getFileSystem().mkdirs(importDir));
        assertTrue(cluster.getFileSystem().mkdirs(failuresDir));
        client.importDirectory(creds, doesNotExist, importDir.toString(), failuresDir.toString(), true);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.listConstraints(creds, doesNotExist);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.listSplits(creds, doesNotExist, 10000);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.mergeTablets(creds, doesNotExist, null, null);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.offlineTable(creds, doesNotExist, false);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.onlineTable(creds, doesNotExist, false);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.removeConstraint(creds, doesNotExist, 0);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.removeIterator(creds, doesNotExist, "name", EnumSet.allOf(IteratorScope.class));
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.removeTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.renameTable(creds, doesNotExist, "someTableName");
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.revokeTablePermission(creds, "root", doesNotExist, TablePermission.ALTER_TABLE);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.setTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey(), "0");
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.splitRangeByTablets(creds, doesNotExist, client.getRowRange(ByteBuffer.wrap("row".getBytes(UTF_8))), 10);
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.updateAndFlush(creds, doesNotExist, new HashMap<ByteBuffer, List<ColumnUpdate>>());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.getDiskUsage(creds, Collections.singleton(doesNotExist));
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.testTableClassLoad(creds, doesNotExist, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
    try {
        client.createConditionalWriter(creds, doesNotExist, new ConditionalWriterOptions());
        fail("exception not thrown");
    } catch (TableNotFoundException ex) {
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BatchScanOptions(org.apache.accumulo.proxy.thrift.BatchScanOptions) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) ByteBuffer(java.nio.ByteBuffer) TableNotFoundException(org.apache.accumulo.proxy.thrift.TableNotFoundException) IteratorSetting(org.apache.accumulo.proxy.thrift.IteratorSetting) ConditionalWriterOptions(org.apache.accumulo.proxy.thrift.ConditionalWriterOptions) ConditionalWriterOptions(org.apache.accumulo.proxy.thrift.ConditionalWriterOptions) WriterOptions(org.apache.accumulo.proxy.thrift.WriterOptions) ScanOptions(org.apache.accumulo.proxy.thrift.ScanOptions) BatchScanOptions(org.apache.accumulo.proxy.thrift.BatchScanOptions) ArrayList(java.util.ArrayList) List(java.util.List) IteratorScope(org.apache.accumulo.proxy.thrift.IteratorScope) VersioningIterator(org.apache.accumulo.core.iterators.user.VersioningIterator) NumericValueConstraint(org.apache.accumulo.test.constraints.NumericValueConstraint) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) SlowIterator(org.apache.accumulo.test.functional.SlowIterator) Test(org.junit.Test)

Example 2 with BatchScanOptions

use of org.apache.accumulo.proxy.thrift.BatchScanOptions in project accumulo by apache.

the class TestProxyReadWrite method readWriteBatchOneShotWithFullColumn.

/**
 * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a columnFamily + columnQualififer so only the entries with specified column
 * come back (there should be 50,000)
 */
@Test
public void readWriteBatchOneShotWithFullColumn() throws Exception {
    int maxInserts = 100000;
    Map<ByteBuffer, List<ColumnUpdate>> mutations = new HashMap<>();
    String format = "%1$05d";
    for (int i = 0; i < maxInserts; i++) {
        addMutation(mutations, String.format(format, i), "cf" + (i % 2), "cq" + (i % 2), Util.randString(10));
        if (i % 1000 == 0 || i == maxInserts - 1) {
            tpc.proxy().updateAndFlush(userpass, testtable, mutations);
            mutations.clear();
        }
    }
    BatchScanOptions options = new BatchScanOptions();
    ScanColumn sc = new ScanColumn();
    sc.colFamily = ByteBuffer.wrap("cf0".getBytes());
    sc.colQualifier = ByteBuffer.wrap("cq0".getBytes());
    options.columns = Collections.singletonList(sc);
    String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
    int i = 0;
    boolean hasNext = true;
    int k = 1000;
    while (hasNext) {
        ScanResult kvList = tpc.proxy().nextK(cookie, k);
        i += kvList.getResultsSize();
        hasNext = kvList.isMore();
    }
    assertEquals(i, 50000);
}
Also used : ScanResult(org.apache.accumulo.proxy.thrift.ScanResult) HashMap(java.util.HashMap) BatchScanOptions(org.apache.accumulo.proxy.thrift.BatchScanOptions) List(java.util.List) ScanColumn(org.apache.accumulo.proxy.thrift.ScanColumn) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 3 with BatchScanOptions

use of org.apache.accumulo.proxy.thrift.BatchScanOptions in project accumulo by apache.

the class TestProxyReadWrite method readWriteBatchOneShotWithColumnFamilyOnly.

/**
 * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a columnFamily so only the entries with specified column family come back
 * (there should be 50,000)
 */
@Test
public void readWriteBatchOneShotWithColumnFamilyOnly() throws Exception {
    int maxInserts = 100000;
    Map<ByteBuffer, List<ColumnUpdate>> mutations = new HashMap<>();
    String format = "%1$05d";
    for (int i = 0; i < maxInserts; i++) {
        addMutation(mutations, String.format(format, i), "cf" + (i % 2), "cq" + (i % 2), Util.randString(10));
        if (i % 1000 == 0 || i == maxInserts - 1) {
            tpc.proxy().updateAndFlush(userpass, testtable, mutations);
            mutations.clear();
        }
    }
    BatchScanOptions options = new BatchScanOptions();
    ScanColumn sc = new ScanColumn();
    sc.colFamily = ByteBuffer.wrap("cf0".getBytes());
    options.columns = Collections.singletonList(sc);
    String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
    int i = 0;
    boolean hasNext = true;
    int k = 1000;
    while (hasNext) {
        ScanResult kvList = tpc.proxy().nextK(cookie, k);
        i += kvList.getResultsSize();
        hasNext = kvList.isMore();
    }
    assertEquals(i, 50000);
}
Also used : ScanResult(org.apache.accumulo.proxy.thrift.ScanResult) HashMap(java.util.HashMap) BatchScanOptions(org.apache.accumulo.proxy.thrift.BatchScanOptions) List(java.util.List) ScanColumn(org.apache.accumulo.proxy.thrift.ScanColumn) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 4 with BatchScanOptions

use of org.apache.accumulo.proxy.thrift.BatchScanOptions in project accumulo by apache.

the class TestProxyReadWrite method readWriteBatchOneShotWithRange.

/**
 * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a range so only the entries between -Inf...5 come back (there should be
 * 50,000)
 */
@Test
public void readWriteBatchOneShotWithRange() throws Exception {
    int maxInserts = 100000;
    Map<ByteBuffer, List<ColumnUpdate>> mutations = new HashMap<>();
    String format = "%1$05d";
    for (int i = 0; i < maxInserts; i++) {
        addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
        if (i % 1000 == 0 || i == maxInserts - 1) {
            tpc.proxy().updateAndFlush(userpass, testtable, mutations);
            mutations.clear();
        }
    }
    Key stop = new Key();
    stop.setRow("5".getBytes());
    BatchScanOptions options = new BatchScanOptions();
    options.ranges = Collections.singletonList(new Range(null, false, stop, false));
    String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
    int i = 0;
    boolean hasNext = true;
    int k = 1000;
    while (hasNext) {
        ScanResult kvList = tpc.proxy().nextK(cookie, k);
        i += kvList.getResultsSize();
        hasNext = kvList.isMore();
    }
    assertEquals(i, 50000);
}
Also used : ScanResult(org.apache.accumulo.proxy.thrift.ScanResult) HashMap(java.util.HashMap) BatchScanOptions(org.apache.accumulo.proxy.thrift.BatchScanOptions) List(java.util.List) Range(org.apache.accumulo.proxy.thrift.Range) ByteBuffer(java.nio.ByteBuffer) Key(org.apache.accumulo.proxy.thrift.Key) Test(org.junit.Test)

Aggregations

ByteBuffer (java.nio.ByteBuffer)4 List (java.util.List)4 BatchScanOptions (org.apache.accumulo.proxy.thrift.BatchScanOptions)4 Test (org.junit.Test)4 HashMap (java.util.HashMap)3 ScanResult (org.apache.accumulo.proxy.thrift.ScanResult)3 ScanColumn (org.apache.accumulo.proxy.thrift.ScanColumn)2 ArrayList (java.util.ArrayList)1 SortedKeyValueIterator (org.apache.accumulo.core.iterators.SortedKeyValueIterator)1 VersioningIterator (org.apache.accumulo.core.iterators.user.VersioningIterator)1 MiniAccumuloClusterImpl (org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl)1 ConditionalWriterOptions (org.apache.accumulo.proxy.thrift.ConditionalWriterOptions)1 IteratorScope (org.apache.accumulo.proxy.thrift.IteratorScope)1 IteratorSetting (org.apache.accumulo.proxy.thrift.IteratorSetting)1 Key (org.apache.accumulo.proxy.thrift.Key)1 Range (org.apache.accumulo.proxy.thrift.Range)1 ScanOptions (org.apache.accumulo.proxy.thrift.ScanOptions)1 TableNotFoundException (org.apache.accumulo.proxy.thrift.TableNotFoundException)1 WriterOptions (org.apache.accumulo.proxy.thrift.WriterOptions)1 NumericValueConstraint (org.apache.accumulo.test.constraints.NumericValueConstraint)1