use of org.apache.accumulo.proxy.thrift.ScanResult in project accumulo by apache.
the class TestProxyReadWrite method asynchReadWrite.
@Test
public void asynchReadWrite() throws Exception {
int maxInserts = 10000;
Map<ByteBuffer, List<ColumnUpdate>> mutations = new HashMap<>();
String format = "%1$05d";
String writer = tpc.proxy().createWriter(userpass, testtable, null);
for (int i = 0; i < maxInserts; i++) {
addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
if (i % 1000 == 0 || i == maxInserts - 1) {
tpc.proxy().update(writer, mutations);
mutations.clear();
}
}
tpc.proxy().flush(writer);
tpc.proxy().closeWriter(writer);
String regex = ".*[02468]";
org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
RegExFilter.setRegexs(is, regex, null, null, null, false);
IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
ScanOptions opts = new ScanOptions();
opts.iterators = Collections.singletonList(pis);
String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
int i = 0;
boolean hasNext = true;
int k = 1000;
int numRead = 0;
while (hasNext) {
ScanResult kvList = tpc.proxy().nextK(cookie, k);
for (KeyValue kv : kvList.getResults()) {
assertEquals(i, Integer.parseInt(new String(kv.getKey().getRow())));
numRead++;
i += 2;
}
hasNext = kvList.isMore();
}
assertEquals(maxInserts / 2, numRead);
}
use of org.apache.accumulo.proxy.thrift.ScanResult in project accumulo by apache.
the class TestProxyReadWrite method readWriteBatchOneShotWithFilterIterator.
/**
* Insert 100000 cells which have as the row [0..99999] (padded with zeros). Filter the results so only the even numbers come back.
*/
@Test
public void readWriteBatchOneShotWithFilterIterator() throws Exception {
int maxInserts = 10000;
Map<ByteBuffer, List<ColumnUpdate>> mutations = new HashMap<>();
String format = "%1$05d";
for (int i = 0; i < maxInserts; i++) {
addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
if (i % 1000 == 0 || i == maxInserts - 1) {
tpc.proxy().updateAndFlush(userpass, testtable, mutations);
mutations.clear();
}
}
String regex = ".*[02468]";
org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
RegExFilter.setRegexs(is, regex, null, null, null, false);
IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
ScanOptions opts = new ScanOptions();
opts.iterators = Collections.singletonList(pis);
String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
int i = 0;
boolean hasNext = true;
int k = 1000;
while (hasNext) {
ScanResult kvList = tpc.proxy().nextK(cookie, k);
for (KeyValue kv : kvList.getResults()) {
assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
i += 2;
}
hasNext = kvList.isMore();
}
}
use of org.apache.accumulo.proxy.thrift.ScanResult in project accumulo by apache.
the class SimpleProxyBase method bulkImport.
@Test
public void bulkImport() throws Exception {
MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
FileSystem fs = cluster.getFileSystem();
Path base = cluster.getTemporaryPath();
Path dir = new Path(base, "test");
assertTrue(fs.mkdirs(dir));
// Write an RFile
String filename = dir + "/bulk/import/rfile.rf";
FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder().forFile(filename, fs, fs.getConf()).withTableConfiguration(DefaultConfiguration.getInstance()).build();
writer.startDefaultLocalityGroup();
writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")), new Value("value".getBytes(UTF_8)));
writer.close();
// Create failures directory
fs.mkdirs(new Path(dir + "/bulk/fail"));
// Run the bulk import
client.importDirectory(creds, tableName, dir + "/bulk/import", dir + "/bulk/fail", true);
// Make sure we find the data
String scanner = client.createScanner(creds, tableName, null);
ScanResult more = client.nextK(scanner, 100);
client.closeScanner(scanner);
assertEquals(1, more.results.size());
ByteBuffer maxRow = client.getMaxRow(creds, tableName, null, null, false, null, false);
assertEquals(s2bb("a"), maxRow);
}
use of org.apache.accumulo.proxy.thrift.ScanResult in project accumulo by apache.
the class SimpleProxyBase method countFiles.
// scan metadata for file entries for the given table
private int countFiles(String table) throws Exception {
Map<String, String> tableIdMap = client.tableIdMap(creds);
String tableId = tableIdMap.get(table);
Key start = new Key();
start.row = s2bb(tableId + ";");
Key end = new Key();
end.row = s2bb(tableId + "<");
end = client.getFollowing(end, PartialKey.ROW);
ScanOptions opt = new ScanOptions();
opt.range = new Range(start, true, end, false);
opt.columns = Collections.singletonList(new ScanColumn(s2bb("file")));
String scanner = client.createScanner(creds, MetadataTable.NAME, opt);
int result = 0;
while (true) {
ScanResult more = client.nextK(scanner, 100);
result += more.getResults().size();
if (!more.more)
break;
}
return result;
}
use of org.apache.accumulo.proxy.thrift.ScanResult in project accumulo by apache.
the class SimpleProxyBase method testDelete.
@Test
public void testDelete() throws Exception {
client.updateAndFlush(creds, tableName, mutation("row0", "cf", "cq", "value"));
assertScan(new String[][] { { "row0", "cf", "cq", "value" } }, tableName);
ColumnUpdate upd = new ColumnUpdate(s2bb("cf"), s2bb("cq"));
upd.setDeleteCell(false);
Map<ByteBuffer, List<ColumnUpdate>> notDelete = Collections.singletonMap(s2bb("row0"), Collections.singletonList(upd));
client.updateAndFlush(creds, tableName, notDelete);
String scanner = client.createScanner(creds, tableName, null);
ScanResult entries = client.nextK(scanner, 10);
client.closeScanner(scanner);
assertFalse(entries.more);
assertEquals("Results: " + entries.results, 1, entries.results.size());
upd = new ColumnUpdate(s2bb("cf"), s2bb("cq"));
upd.setDeleteCell(true);
Map<ByteBuffer, List<ColumnUpdate>> delete = Collections.singletonMap(s2bb("row0"), Collections.singletonList(upd));
client.updateAndFlush(creds, tableName, delete);
assertScan(new String[][] {}, tableName);
}
Aggregations