use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class Shell method getDefaultPrompt.
public String getDefaultPrompt() {
Objects.requireNonNull(accumuloClient);
ClientInfo info = ClientInfo.from(accumuloClient.properties());
return accumuloClient.whoami() + "@" + info.getInstanceName() + (getTableName().isEmpty() ? "" : " ") + getTableName() + "> ";
}
use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class Shell method printInfo.
public void printInfo() throws IOException {
ClientInfo info = ClientInfo.from(accumuloClient.properties());
writer.print("\n" + SHELL_DESCRIPTION + "\n- \n- version: " + Constants.VERSION + "\n" + "- instance name: " + info.getInstanceName() + "\n- instance id: " + accumuloClient.instanceOperations().getInstanceId() + "\n- \n" + "- type 'help' for a list of available commands\n- \n");
writer.flush();
}
use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class AccumuloInputFormatIT method testCorrectRangeInputSplits.
@Test
public void testCorrectRangeInputSplits() throws Exception {
JobConf job = new JobConf();
String table = getUniqueNames(1)[0];
Authorizations auths = new Authorizations("foo");
Collection<Pair<Text, Text>> fetchColumns = Collections.singleton(new Pair<>(new Text("foo"), new Text("bar")));
boolean isolated = true, localIters = true;
Level level = Level.WARN;
try (AccumuloClient accumuloClient = Accumulo.newClient().from(getClientProps()).build()) {
accumuloClient.tableOperations().create(table);
ClientInfo ci = getClientInfo();
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setZooKeeperInstance(job, ci.getInstanceName(), ci.getZooKeepers());
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setConnectorInfo(job, ci.getPrincipal(), ci.getAuthenticationToken());
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setInputTableName(job, table);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setScanAuthorizations(job, auths);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setScanIsolation(job, isolated);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setLocalIterators(job, localIters);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.fetchColumns(job, fetchColumns);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat.setLogLevel(job, level);
org.apache.accumulo.core.client.mapred.AccumuloInputFormat aif = new org.apache.accumulo.core.client.mapred.AccumuloInputFormat();
InputSplit[] splits = aif.getSplits(job, 1);
assertEquals(1, splits.length);
InputSplit split = splits[0];
assertEquals(org.apache.accumulo.core.client.mapred.RangeInputSplit.class, split.getClass());
org.apache.accumulo.core.client.mapred.RangeInputSplit risplit = (org.apache.accumulo.core.client.mapred.RangeInputSplit) split;
assertEquals(table, risplit.getTableName());
assertEquals(isolated, risplit.isIsolatedScan());
assertEquals(localIters, risplit.usesLocalIterators());
assertEquals(fetchColumns, risplit.getFetchedColumns());
assertEquals(level, risplit.getLogLevel());
}
}
use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class AccumuloOutputFormatIT method testMapred.
// Prevent regression of ACCUMULO-3709.
@Test
public void testMapred() throws Exception {
Properties props = getClientProperties();
try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
// create a table and put some data in it
client.tableOperations().create(testName.getMethodName());
JobConf job = new JobConf();
BatchWriterConfig batchConfig = new BatchWriterConfig();
// no flushes!!!!!
batchConfig.setMaxLatency(0, TimeUnit.MILLISECONDS);
// use a single thread to ensure our update session times out
batchConfig.setMaxWriteThreads(1);
// set the max memory so that we ensure we don't flush on the write.
batchConfig.setMaxMemory(Long.MAX_VALUE);
org.apache.accumulo.core.client.mapred.AccumuloOutputFormat outputFormat = new org.apache.accumulo.core.client.mapred.AccumuloOutputFormat();
ClientInfo ci = ClientInfo.from(props);
org.apache.accumulo.core.client.mapred.AccumuloOutputFormat.setZooKeeperInstance(job, ci.getInstanceName(), ci.getZooKeepers());
org.apache.accumulo.core.client.mapred.AccumuloOutputFormat.setConnectorInfo(job, ci.getPrincipal(), ci.getAuthenticationToken());
org.apache.accumulo.core.client.mapred.AccumuloOutputFormat.setBatchWriterOptions(job, batchConfig);
RecordWriter<Text, Mutation> writer = outputFormat.getRecordWriter(null, job, "Test", null);
try {
for (int i = 0; i < 3; i++) {
Mutation m = new Mutation(new Text(String.format("%08d", i)));
for (int j = 0; j < 3; j++) {
m.put("cf1", "cq" + j, i + "_" + j);
}
writer.write(new Text(testName.getMethodName()), m);
}
} catch (Exception e) {
e.printStackTrace();
// we don't want the exception to come from write
}
client.securityOperations().revokeTablePermission("root", testName.getMethodName(), TablePermission.WRITE);
var ex = assertThrows(IOException.class, () -> writer.close(null));
log.info(ex.getMessage(), ex);
assertTrue(ex.getCause() instanceof MutationsRejectedException);
}
}
use of org.apache.accumulo.core.clientImpl.ClientInfo in project accumulo by apache.
the class RecoveryWithEmptyRFileIT method replaceMissingRFile.
@Test
public void replaceMissingRFile() throws Exception {
log.info("Ingest some data, verify it was stored properly, replace an" + " underlying rfile with an empty one and verify we can scan.");
Properties props = getClientProperties();
ClientInfo info = ClientInfo.from(props);
try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
String tableName = getUniqueNames(1)[0];
ReadWriteIT.ingest(client, info, ROWS, COLS, 50, 0, tableName);
ReadWriteIT.verify(client, info, ROWS, COLS, 50, 0, tableName);
client.tableOperations().flush(tableName, null, null, true);
client.tableOperations().offline(tableName, true);
log.debug("Replacing rfile(s) with empty");
try (Scanner meta = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
String tableId = client.tableOperations().tableIdMap().get(tableName);
meta.setRange(new Range(new Text(tableId + ";"), new Text(tableId + "<")));
meta.fetchColumnFamily(DataFileColumnFamily.NAME);
boolean foundFile = false;
for (Entry<Key, Value> entry : meta) {
foundFile = true;
Path rfile = new Path(entry.getKey().getColumnQualifier().toString());
log.debug("Removing rfile '{}'", rfile);
cluster.getFileSystem().delete(rfile, false);
Process processInfo = cluster.exec(CreateEmpty.class, rfile.toString()).getProcess();
assertEquals(0, processInfo.waitFor());
}
assertTrue(foundFile);
}
log.trace("invalidate cached file handles by issuing a compaction");
client.tableOperations().online(tableName, true);
client.tableOperations().compact(tableName, null, null, false, true);
log.debug("make sure we can still scan");
try (Scanner scan = client.createScanner(tableName, Authorizations.EMPTY)) {
scan.setRange(new Range());
long cells = 0L;
for (Entry<Key, Value> entry : scan) {
if (entry != null)
cells++;
}
assertEquals(0L, cells);
}
}
}
Aggregations