use of org.apache.accumulo.proxy.thrift.BatchScanOptions in project accumulo by apache.
the class SimpleProxyBase method tableNotFound.
@Test
public void tableNotFound() throws Exception {
final String doesNotExist = "doesNotExists";
try {
client.addConstraint(creds, doesNotExist, NumericValueConstraint.class.getName());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.addSplits(creds, doesNotExist, Collections.<ByteBuffer>emptySet());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
final IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "200"));
try {
client.attachIterator(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.cancelCompaction(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.checkIteratorConflicts(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.clearLocatorCache(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
final String TABLE_TEST = getUniqueNames(1)[0];
client.cloneTable(creds, doesNotExist, TABLE_TEST, false, null, null);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.compactTable(creds, doesNotExist, null, null, null, true, false, null);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.createBatchScanner(creds, doesNotExist, new BatchScanOptions());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.createScanner(creds, doesNotExist, new ScanOptions());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.createWriter(creds, doesNotExist, new WriterOptions());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.deleteRows(creds, doesNotExist, null, null);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.deleteTable(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.exportTable(creds, doesNotExist, "/tmp");
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.flushTable(creds, doesNotExist, null, null, false);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getIteratorSetting(creds, doesNotExist, "foo", IteratorScope.SCAN);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getLocalityGroups(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getMaxRow(creds, doesNotExist, Collections.<ByteBuffer>emptySet(), null, false, null, false);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getTableProperties(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.grantTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.hasTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
Path base = cluster.getTemporaryPath();
Path importDir = new Path(base, "importDir");
Path failuresDir = new Path(base, "failuresDir");
assertTrue(cluster.getFileSystem().mkdirs(importDir));
assertTrue(cluster.getFileSystem().mkdirs(failuresDir));
client.importDirectory(creds, doesNotExist, importDir.toString(), failuresDir.toString(), true);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.listConstraints(creds, doesNotExist);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.listSplits(creds, doesNotExist, 10000);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.mergeTablets(creds, doesNotExist, null, null);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.offlineTable(creds, doesNotExist, false);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.onlineTable(creds, doesNotExist, false);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.removeConstraint(creds, doesNotExist, 0);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.removeIterator(creds, doesNotExist, "name", EnumSet.allOf(IteratorScope.class));
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.removeTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.renameTable(creds, doesNotExist, "someTableName");
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.revokeTablePermission(creds, "root", doesNotExist, TablePermission.ALTER_TABLE);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.setTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey(), "0");
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.splitRangeByTablets(creds, doesNotExist, client.getRowRange(ByteBuffer.wrap("row".getBytes(UTF_8))), 10);
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.updateAndFlush(creds, doesNotExist, new HashMap<ByteBuffer, List<ColumnUpdate>>());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.getDiskUsage(creds, Collections.singleton(doesNotExist));
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.testTableClassLoad(creds, doesNotExist, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
try {
client.createConditionalWriter(creds, doesNotExist, new ConditionalWriterOptions());
fail("exception not thrown");
} catch (TableNotFoundException ex) {
}
}
use of org.apache.accumulo.proxy.thrift.BatchScanOptions in project accumulo by apache.
the class TestProxyReadWrite method readWriteBatchOneShotWithFullColumn.
/**
* Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a columnFamily + columnQualififer so only the entries with specified column
* come back (there should be 50,000)
*/
@Test
public void readWriteBatchOneShotWithFullColumn() throws Exception {
int maxInserts = 100000;
Map<ByteBuffer, List<ColumnUpdate>> mutations = new HashMap<>();
String format = "%1$05d";
for (int i = 0; i < maxInserts; i++) {
addMutation(mutations, String.format(format, i), "cf" + (i % 2), "cq" + (i % 2), Util.randString(10));
if (i % 1000 == 0 || i == maxInserts - 1) {
tpc.proxy().updateAndFlush(userpass, testtable, mutations);
mutations.clear();
}
}
BatchScanOptions options = new BatchScanOptions();
ScanColumn sc = new ScanColumn();
sc.colFamily = ByteBuffer.wrap("cf0".getBytes());
sc.colQualifier = ByteBuffer.wrap("cq0".getBytes());
options.columns = Collections.singletonList(sc);
String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
int i = 0;
boolean hasNext = true;
int k = 1000;
while (hasNext) {
ScanResult kvList = tpc.proxy().nextK(cookie, k);
i += kvList.getResultsSize();
hasNext = kvList.isMore();
}
assertEquals(i, 50000);
}
use of org.apache.accumulo.proxy.thrift.BatchScanOptions in project accumulo by apache.
the class TestProxyReadWrite method readWriteBatchOneShotWithColumnFamilyOnly.
/**
* Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a columnFamily so only the entries with specified column family come back
* (there should be 50,000)
*/
@Test
public void readWriteBatchOneShotWithColumnFamilyOnly() throws Exception {
int maxInserts = 100000;
Map<ByteBuffer, List<ColumnUpdate>> mutations = new HashMap<>();
String format = "%1$05d";
for (int i = 0; i < maxInserts; i++) {
addMutation(mutations, String.format(format, i), "cf" + (i % 2), "cq" + (i % 2), Util.randString(10));
if (i % 1000 == 0 || i == maxInserts - 1) {
tpc.proxy().updateAndFlush(userpass, testtable, mutations);
mutations.clear();
}
}
BatchScanOptions options = new BatchScanOptions();
ScanColumn sc = new ScanColumn();
sc.colFamily = ByteBuffer.wrap("cf0".getBytes());
options.columns = Collections.singletonList(sc);
String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
int i = 0;
boolean hasNext = true;
int k = 1000;
while (hasNext) {
ScanResult kvList = tpc.proxy().nextK(cookie, k);
i += kvList.getResultsSize();
hasNext = kvList.isMore();
}
assertEquals(i, 50000);
}
use of org.apache.accumulo.proxy.thrift.BatchScanOptions in project accumulo by apache.
the class TestProxyReadWrite method readWriteBatchOneShotWithRange.
/**
* Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a range so only the entries between -Inf...5 come back (there should be
* 50,000)
*/
@Test
public void readWriteBatchOneShotWithRange() throws Exception {
int maxInserts = 100000;
Map<ByteBuffer, List<ColumnUpdate>> mutations = new HashMap<>();
String format = "%1$05d";
for (int i = 0; i < maxInserts; i++) {
addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
if (i % 1000 == 0 || i == maxInserts - 1) {
tpc.proxy().updateAndFlush(userpass, testtable, mutations);
mutations.clear();
}
}
Key stop = new Key();
stop.setRow("5".getBytes());
BatchScanOptions options = new BatchScanOptions();
options.ranges = Collections.singletonList(new Range(null, false, stop, false));
String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
int i = 0;
boolean hasNext = true;
int k = 1000;
while (hasNext) {
ScanResult kvList = tpc.proxy().nextK(cookie, k);
i += kvList.getResultsSize();
hasNext = kvList.isMore();
}
assertEquals(i, 50000);
}
Aggregations