use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class AccumuloInputFormatIT method testCorrectRangeInputSplits.
@Test
public void testCorrectRangeInputSplits() throws Exception {
JobConf job = new JobConf();
String table = getUniqueNames(1)[0];
Authorizations auths = new Authorizations("foo");
Collection<Pair<Text, Text>> fetchColumns = Collections.singleton(new Pair<>(new Text("foo"), new Text("bar")));
boolean isolated = true, localIters = true;
Level level = Level.WARN;
try (AccumuloClient accumuloClient = Accumulo.newClient().from(getClientProps()).build()) {
accumuloClient.tableOperations().create(table);
ClientInfo ci = getClientInfo();
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setZooKeeperInstance(job, ci.getInstanceName(), ci.getZooKeepers());
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setConnectorInfo(job, ci.getPrincipal(), ci.getAuthenticationToken());
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setInputTableName(job, table);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setScanAuthorizations(job, auths);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setScanIsolation(job, isolated);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setLocalIterators(job, localIters);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.fetchColumns(job, fetchColumns);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setLogLevel(job, level);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat aif = new org.apache.accumulo.core.client.mapred.AccumuloInputFormat();
InputSplit[] splits = aif.getSplits(job, 1);
assertEquals(1, splits.length);
InputSplit split = splits[0];
assertEquals(org.apache.accumulo.core.client.mapred.RangeInputSplit.class, split.getClass());
org.apache.accumulo.core.client.mapred.RangeInputSplit risplit = (org.apache.accumulo.core.client.mapred.RangeInputSplit) split;
assertEquals(table, risplit.getTableName());
assertEquals(isolated, risplit.isIsolatedScan());
assertEquals(localIters, risplit.usesLocalIterators());
assertEquals(fetchColumns, risplit.getFetchedColumns());
assertEquals(level, risplit.getLogLevel());
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class AccumuloOutputFormatIT method testMapred.
// Prevent regression of ACCUMULO-3709.
@Test
public void testMapred() throws Exception {
Properties props = getClientProperties();
try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
// create a table and put some data in it
client.tableOperations().create(testName.getMethodName());
JobConf job = new JobConf();
BatchWriterConfig batchConfig = new BatchWriterConfig();
// no flushes!!!!!
batchConfig.setMaxLatency(0, TimeUnit.MILLISECONDS);
// use a single thread to ensure our update session times out
batchConfig.setMaxWriteThreads(1);
// set the max memory so that we ensure we don't flush on the write.
batchConfig.setMaxMemory(Long.MAX_VALUE);
org.apache.accumulo.core.client.mapred.AccumuloOutputFormat outputFormat = new org.apache.accumulo.core.client.mapred.AccumuloOutputFormat();
ClientInfo ci = ClientInfo.from(props);
org.apache.accumulo.core.client.mapred.AccumuloOutputFormat.setZooKeeperInstance(job, ci.getInstanceName(), ci.getZooKeepers());
org.apache.accumulo.core.client.mapred.AccumuloOutputFormat.setConnectorInfo(job, ci.getPrincipal(), ci.getAuthenticationToken());
org.apache.accumulo.core.client.mapred.AccumuloOutputFormat.setBatchWriterOptions(job, batchConfig);
RecordWriter<Text, Mutation> writer = outputFormat.getRecordWriter(null, job, "Test", null);
try {
for (int i = 0; i < 3; i++) {
Mutation m = new Mutation(new Text(String.format("%08d", i)));
for (int j = 0; j < 3; j++) {
m.put("cf1", "cq" + j, i + "_" + j);
}
writer.write(new Text(testName.getMethodName()), m);
}
} catch (Exception e) {
e.printStackTrace();
// we don't want the exception to come from write
}
client.securityOperations().revokeTablePermission("root", testName.getMethodName(), TablePermission.WRITE);
var ex = assertThrows(IOException.class, () -> writer.close(null));
log.info(ex.getMessage(), ex);
assertTrue(ex.getCause() instanceof MutationsRejectedException);
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class TokenFileIT method testMR.
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "path provided by test")
@Test
public void testMR() throws Exception {
String[] tableNames = getUniqueNames(2);
String table1 = tableNames[0];
String table2 = tableNames[1];
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
c.tableOperations().create(table1);
c.tableOperations().create(table2);
try (BatchWriter bw = c.createBatchWriter(table1)) {
for (int i = 0; i < 100; i++) {
Mutation m = new Mutation(new Text(String.format("%09x", i + 1)));
m.put("", "", String.format("%09x", i));
bw.addMutation(m);
}
}
File tf = folder.newFile("root_test.pw");
try (PrintStream out = new PrintStream(tf)) {
String outString = new Credentials(getAdminPrincipal(), getAdminToken()).serialize();
out.println(outString);
}
Configuration conf = cluster.getServerContext().getHadoopConf();
conf.set("hadoop.tmp.dir", new File(tf.getAbsolutePath()).getParent());
conf.set("mapreduce.framework.name", "local");
conf.set("mapreduce.cluster.local.dir", new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
assertEquals(0, ToolRunner.run(conf, new MRTokenFileTester(), new String[] { tf.getAbsolutePath(), table1, table2 }));
if (e1 != null) {
e1.printStackTrace();
}
assertNull(e1);
try (Scanner scanner = c.createScanner(table2, new Authorizations())) {
Iterator<Entry<Key, Value>> iter = scanner.iterator();
assertTrue(iter.hasNext());
Entry<Key, Value> entry = iter.next();
assertEquals(Integer.parseInt(new String(entry.getValue().get())), 100);
assertFalse(iter.hasNext());
}
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class SummaryIT method tooLargeTest.
@Test
public void tooLargeTest() throws Exception {
final String table = getUniqueNames(1)[0];
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
NewTableConfiguration ntc = new NewTableConfiguration();
SummarizerConfiguration sc1 = SummarizerConfiguration.builder(BigSummarizer.class).build();
ntc.enableSummarization(sc1);
c.tableOperations().create(table, ntc);
try (BatchWriter bw = c.createBatchWriter(table)) {
write(bw, "a_large", "f1", "q1", "v1");
write(bw, "v_small", "f1", "q1", "v2");
}
c.tableOperations().flush(table, null, null, true);
Summary summary = c.tableOperations().summaries(table).retrieve().get(0);
assertEquals(1, summary.getFileStatistics().getLarge());
assertEquals(0, summary.getFileStatistics().getMissing());
assertEquals(0, summary.getFileStatistics().getExtra());
assertEquals(0, summary.getFileStatistics().getDeleted());
assertEquals(1, summary.getFileStatistics().getInaccurate());
assertEquals(1, summary.getFileStatistics().getTotal());
assertEquals(Collections.emptyMap(), summary.getStatistics());
// create situation where one tablet has summary data and one does not because the summary
// data
// was too large
c.tableOperations().addSplits(table, new TreeSet<>(Collections.singleton(new Text("m"))));
c.tableOperations().compact(table, new CompactionConfig().setWait(true));
summary = c.tableOperations().summaries(table).retrieve().get(0);
assertEquals(1, summary.getFileStatistics().getLarge());
assertEquals(0, summary.getFileStatistics().getMissing());
assertEquals(0, summary.getFileStatistics().getExtra());
assertEquals(0, summary.getFileStatistics().getDeleted());
assertEquals(1, summary.getFileStatistics().getInaccurate());
assertEquals(2, summary.getFileStatistics().getTotal());
HashMap<String, Long> expected = new HashMap<>();
for (int i = 0; i < 10; i++) {
expected.put(String.format("%09x", i), i * 19L);
}
assertEquals(expected, summary.getStatistics());
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class TabletIT method createTableTest.
@Test
public void createTableTest() throws Exception {
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
String tableName = getUniqueNames(1)[0];
createTableTest(client, tableName, false);
createTableTest(client, tableName, true);
}
}
Aggregations