use of org.apache.accumulo.core.client.ClientConfiguration in project incubator-rya by apache.
the class CopyTool method setupAccumuloInput.
@Override
protected void setupAccumuloInput(final Job job) throws AccumuloSecurityException {
if (useCopyFileImport) {
try {
FileInputFormat.setInputPaths(job, localCopyFileImportDir);
} catch (final IOException e) {
log.error("Failed to set copy file import directory", e);
}
} else {
// set up accumulo input
if (!hdfsInput) {
job.setInputFormatClass(AccumuloInputFormat.class);
} else {
job.setInputFormatClass(AccumuloHDFSFileInputFormat.class);
}
AbstractInputFormat.setConnectorInfo(job, userName, new PasswordToken(pwd));
InputFormatBase.setInputTableName(job, RdfCloudTripleStoreUtils.layoutPrefixToTable(rdfTableLayout, tablePrefix));
AbstractInputFormat.setScanAuthorizations(job, authorizations);
if (!mock) {
AbstractInputFormat.setZooKeeperInstance(job, new ClientConfiguration().withInstance(instance).withZkHosts(zk));
} else {
AbstractInputFormat.setMockInstance(job, instance);
}
if (ttl != null) {
final IteratorSetting setting = new IteratorSetting(1, "fi", AgeOffFilter.class);
AgeOffFilter.setTTL(setting, Long.valueOf(ttl));
InputFormatBase.addIterator(job, setting);
}
if (startTime != null) {
final IteratorSetting setting = getStartTimeSetting(startTime);
InputFormatBase.addIterator(job, setting);
}
for (final IteratorSetting iteratorSetting : AccumuloRyaUtils.COMMON_REG_EX_FILTER_SETTINGS) {
InputFormatBase.addIterator(job, iteratorSetting);
}
}
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class Shell method config.
/**
* Configures the shell using the provided options. Not for client use.
*
* @return true if the shell was successfully configured, false otherwise.
* @throws IOException
* if problems occur creating the ConsoleReader
*/
public boolean config(String... args) throws IOException {
if (this.reader == null)
this.reader = new ConsoleReader();
ShellOptionsJC options = new ShellOptionsJC();
JCommander jc = new JCommander();
jc.setProgramName("accumulo shell");
jc.addObject(options);
try {
jc.parse(args);
} catch (ParameterException e) {
jc.usage();
exitCode = 1;
return false;
}
if (options.isHelpEnabled()) {
jc.usage();
// Not an error
exitCode = 0;
return false;
}
if (options.getUnrecognizedOptions() != null) {
logError("Unrecognized Options: " + options.getUnrecognizedOptions().toString());
jc.usage();
exitCode = 1;
return false;
}
setDebugging(options.isDebugEnabled());
authTimeout = TimeUnit.MINUTES.toNanos(options.getAuthTimeout());
disableAuthTimeout = options.isAuthTimeoutDisabled();
ClientConfiguration clientConf;
try {
clientConf = options.getClientConfiguration();
} catch (Exception e) {
printException(e);
return true;
}
if (Boolean.parseBoolean(clientConf.get(ClientProperty.INSTANCE_RPC_SASL_ENABLED))) {
log.debug("SASL is enabled, disabling authorization timeout");
disableAuthTimeout = true;
}
// get the options that were parsed
final String user;
try {
user = options.getUsername();
} catch (Exception e) {
printException(e);
return true;
}
String password = options.getPassword();
tabCompletion = !options.isTabCompletionDisabled();
// Use a ZK, or HdfsZK Accumulo instance
setInstance(options);
// AuthenticationToken options
try {
token = options.getAuthenticationToken();
} catch (Exception e) {
printException(e);
return true;
}
Map<String, String> loginOptions = options.getTokenProperties();
// process default parameters if unspecified
try {
final boolean hasToken = (token != null);
if (hasToken && password != null) {
throw new ParameterException("Can not supply '--pass' option with '--tokenClass' option");
}
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
reader.getTerminal().setEchoEnabled(true);
}
});
if (hasToken) {
// implied hasTokenOptions
// Fully qualified name so we don't shadow java.util.Properties
org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties props;
// and line wrap it because the package name is so long
props = new org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Properties();
if (!loginOptions.isEmpty()) {
props.putAllStrings(loginOptions);
}
token.init(props);
} else {
// Read password if the user explicitly asked for it, or didn't specify anything at all
if ("stdin".equals(password) || password == null) {
password = reader.readLine("Password: ", '*');
}
if (password == null) {
// User cancel, e.g. Ctrl-D pressed
throw new ParameterException("No password or token option supplied");
} else {
this.token = new PasswordToken(password);
}
}
if (!options.isFake()) {
DistributedTrace.enable(InetAddress.getLocalHost().getHostName(), "shell", clientConf);
}
this.setTableName("");
connector = instance.getConnector(user, token);
} catch (Exception e) {
printException(e);
exitCode = 1;
return false;
}
// decide whether to execute commands from a file and quit
if (options.getExecFile() != null) {
execFile = options.getExecFile();
verbose = false;
} else if (options.getExecFileVerbose() != null) {
execFile = options.getExecFileVerbose();
verbose = true;
}
execCommand = options.getExecCommand();
if (execCommand != null) {
verbose = false;
}
rootToken = new Token();
Command[] dataCommands = { new DeleteCommand(), new DeleteManyCommand(), new DeleteRowsCommand(), new EGrepCommand(), new FormatterCommand(), new InterpreterCommand(), new GrepCommand(), new ImportDirectoryCommand(), new InsertCommand(), new MaxRowCommand(), new ScanCommand() };
Command[] debuggingCommands = { new ClasspathCommand(), new DebugCommand(), new ListScansCommand(), new ListCompactionsCommand(), new TraceCommand(), new PingCommand(), new ListBulkCommand() };
Command[] execCommands = { new ExecfileCommand(), new HistoryCommand(), new ExtensionCommand(), new ScriptCommand() };
Command[] exitCommands = { new ByeCommand(), new ExitCommand(), new QuitCommand() };
Command[] helpCommands = { new AboutCommand(), new HelpCommand(), new InfoCommand(), new QuestionCommand() };
Command[] iteratorCommands = { new DeleteIterCommand(), new DeleteScanIterCommand(), new ListIterCommand(), new SetIterCommand(), new SetScanIterCommand(), new SetShellIterCommand(), new ListShellIterCommand(), new DeleteShellIterCommand() };
Command[] otherCommands = { new HiddenCommand() };
Command[] permissionsCommands = { new GrantCommand(), new RevokeCommand(), new SystemPermissionsCommand(), new TablePermissionsCommand(), new UserPermissionsCommand(), new NamespacePermissionsCommand() };
Command[] stateCommands = { new AuthenticateCommand(), new ClsCommand(), new ClearCommand(), new FateCommand(), new NoTableCommand(), new SleepCommand(), new TableCommand(), new UserCommand(), new WhoAmICommand() };
Command[] tableCommands = { new CloneTableCommand(), new ConfigCommand(), new CreateTableCommand(), new DeleteTableCommand(), new DropTableCommand(), new DUCommand(), new ExportTableCommand(), new ImportTableCommand(), new OfflineCommand(), new OnlineCommand(), new RenameTableCommand(), new TablesCommand(), new NamespacesCommand(), new CreateNamespaceCommand(), new DeleteNamespaceCommand(), new RenameNamespaceCommand(), new SummariesCommand() };
Command[] tableControlCommands = { new AddSplitsCommand(), new CompactCommand(), new ConstraintCommand(), new FlushCommand(), new GetGroupsCommand(), new GetSplitsCommand(), new MergeCommand(), new SetGroupsCommand() };
Command[] userCommands = { new AddAuthsCommand(), new CreateUserCommand(), new DeleteUserCommand(), new DropUserCommand(), new GetAuthsCommand(), new PasswdCommand(), new SetAuthsCommand(), new UsersCommand(), new DeleteAuthsCommand() };
commandGrouping.put("-- Writing, Reading, and Removing Data --", dataCommands);
commandGrouping.put("-- Debugging Commands -------------------", debuggingCommands);
commandGrouping.put("-- Shell Execution Commands -------------", execCommands);
commandGrouping.put("-- Exiting Commands ---------------------", exitCommands);
commandGrouping.put("-- Help Commands ------------------------", helpCommands);
commandGrouping.put("-- Iterator Configuration ---------------", iteratorCommands);
commandGrouping.put("-- Permissions Administration Commands --", permissionsCommands);
commandGrouping.put("-- Shell State Commands -----------------", stateCommands);
commandGrouping.put("-- Table Administration Commands --------", tableCommands);
commandGrouping.put("-- Table Control Commands ---------------", tableControlCommands);
commandGrouping.put("-- User Administration Commands ---------", userCommands);
for (Command[] cmds : commandGrouping.values()) {
for (Command cmd : cmds) commandFactory.put(cmd.getName(), cmd);
}
for (Command cmd : otherCommands) {
commandFactory.put(cmd.getName(), cmd);
}
return true;
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class DynamicThreadPoolsIT method test.
@Test
public void test() throws Exception {
final String[] tables = getUniqueNames(15);
String firstTable = tables[0];
Connector c = getConnector();
c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "5");
TestIngest.Opts opts = new TestIngest.Opts();
opts.rows = 500 * 1000;
opts.createTable = true;
opts.setTableName(firstTable);
ClientConfiguration clientConf = cluster.getClientConfig();
if (clientConf.hasSasl()) {
opts.updateKerberosCredentials(clientConf);
} else {
opts.setPrincipal(getAdminPrincipal());
}
TestIngest.ingest(c, opts, new BatchWriterOpts());
c.tableOperations().flush(firstTable, null, null, true);
for (int i = 1; i < tables.length; i++) c.tableOperations().clone(firstTable, tables[i], true, null, null);
// time between checks of the thread pool sizes
sleepUninterruptibly(11, TimeUnit.SECONDS);
Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
for (int i = 1; i < tables.length; i++) c.tableOperations().compact(tables[i], null, null, true, false);
for (int i = 0; i < 30; i++) {
int count = 0;
MasterClientService.Iface client = null;
MasterMonitorInfo stats = null;
while (true) {
try {
client = MasterClient.getConnectionWithRetry(new ClientContext(c.getInstance(), creds, clientConf));
stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
break;
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} finally {
if (client != null)
MasterClient.close(client);
}
}
for (TabletServerStatus server : stats.tServerInfo) {
for (TableInfo table : server.tableMap.values()) {
count += table.majors.running;
}
}
System.out.println("count " + count);
if (count > 3)
return;
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
fail("Could not observe higher number of threads after changing the config");
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class BulkSplitOptimizationIT method testBulkSplitOptimization.
@Test
public void testBulkSplitOptimization() throws Exception {
final Connector c = getConnector();
final String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1000");
c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "1000");
c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
FileSystem fs = cluster.getFileSystem();
Path testDir = new Path(getUsableDir(), "testmf");
FunctionalTestUtils.createRFiles(c, fs, testDir.toString(), ROWS, SPLITS, 8);
FileStatus[] stats = fs.listStatus(testDir);
System.out.println("Number of generated files: " + stats.length);
FunctionalTestUtils.bulkImport(c, fs, tableName, testDir.toString());
FunctionalTestUtils.checkSplits(c, tableName, 0, 0);
FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 100, 100);
// initiate splits
getConnector().tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");
sleepUninterruptibly(2, TimeUnit.SECONDS);
// wait until over split threshold -- should be 78 splits
while (getConnector().tableOperations().listSplits(tableName).size() < 75) {
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
FunctionalTestUtils.checkSplits(c, tableName, 50, 100);
VerifyIngest.Opts opts = new VerifyIngest.Opts();
opts.timestamp = 1;
opts.dataSize = 50;
opts.random = 56;
opts.rows = 100000;
opts.startRow = 0;
opts.cols = 1;
opts.setTableName(tableName);
AuthenticationToken adminToken = getAdminToken();
if (adminToken instanceof PasswordToken) {
PasswordToken token = (PasswordToken) getAdminToken();
opts.setPassword(new Password(new String(token.getPassword(), UTF_8)));
opts.setPrincipal(getAdminPrincipal());
} else if (adminToken instanceof KerberosToken) {
ClientConfiguration clientConf = cluster.getClientConfig();
opts.updateKerberosCredentials(clientConf);
} else {
Assert.fail("Unknown token type");
}
VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
// ensure each tablet does not have all map files, should be ~2.5 files per tablet
FunctionalTestUtils.checkRFiles(c, tableName, 50, 100, 1, 4);
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class TransportCachingIT method testCachedTransport.
@Test
public void testCachedTransport() {
Connector conn = getConnector();
Instance instance = conn.getInstance();
ClientConfiguration clientConf = cluster.getClientConfig();
ClientContext context = new ClientContext(instance, new Credentials(getAdminPrincipal(), getAdminToken()), clientConf);
long rpcTimeout = ConfigurationTypeHelper.getTimeInMillis(Property.GENERAL_RPC_TIMEOUT.getDefaultValue());
// create list of servers
ArrayList<ThriftTransportKey> servers = new ArrayList<>();
// add tservers
ZooCache zc = new ZooCacheFactory().getZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
for (String tserver : zc.getChildren(ZooUtil.getRoot(instance) + Constants.ZTSERVERS)) {
String path = ZooUtil.getRoot(instance) + Constants.ZTSERVERS + "/" + tserver;
byte[] data = ZooUtil.getLockData(zc, path);
if (data != null) {
String strData = new String(data, UTF_8);
if (!strData.equals("master"))
servers.add(new ThriftTransportKey(new ServerServices(strData).getAddress(Service.TSERV_CLIENT), rpcTimeout, context));
}
}
ThriftTransportPool pool = ThriftTransportPool.getInstance();
TTransport first = null;
while (null == first) {
try {
// Get a transport (cached or not)
first = pool.getAnyTransport(servers, true).getSecond();
} catch (TTransportException e) {
log.warn("Failed to obtain transport to {}", servers);
}
}
assertNotNull(first);
// Return it to unreserve it
pool.returnTransport(first);
TTransport second = null;
while (null == second) {
try {
// Get a cached transport (should be the first)
second = pool.getAnyTransport(servers, true).getSecond();
} catch (TTransportException e) {
log.warn("Failed obtain 2nd transport to {}", servers);
}
}
// We should get the same transport
assertTrue("Expected the first and second to be the same instance", first == second);
// Return the 2nd
pool.returnTransport(second);
TTransport third = null;
while (null == third) {
try {
// Get a non-cached transport
third = pool.getAnyTransport(servers, false).getSecond();
} catch (TTransportException e) {
log.warn("Failed obtain 2nd transport to {}", servers);
}
}
assertFalse("Expected second and third transport to be different instances", second == third);
pool.returnTransport(third);
}
Aggregations