use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class CredentialsIT method createLocalUser.
@Before
public void createLocalUser() throws AccumuloException, AccumuloSecurityException {
Connector conn = getConnector();
inst = conn.getInstance();
ClientConfiguration clientConf = cluster.getClientConfig();
ClusterUser user = getUser(0);
username = user.getPrincipal();
saslEnabled = clientConf.hasSasl();
// Create the user if it doesn't exist
Set<String> users = conn.securityOperations().listLocalUsers();
if (!users.contains(username)) {
PasswordToken passwdToken = null;
if (!saslEnabled) {
password = user.getPassword();
passwdToken = new PasswordToken(password);
}
conn.securityOperations().createLocalUser(username, passwdToken);
}
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class CompactionIT method test.
@Test
public void test() throws Exception {
final Connector c = getConnector();
final String tableName = getUniqueNames(1)[0];
c.tableOperations().create(tableName);
c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
FileSystem fs = getFileSystem();
Path root = new Path(cluster.getTemporaryPath(), getClass().getName());
Path testrf = new Path(root, "testrf");
FunctionalTestUtils.createRFiles(c, fs, testrf.toString(), 500000, 59, 4);
FunctionalTestUtils.bulkImport(c, fs, tableName, testrf.toString());
int beforeCount = countFiles(c);
final AtomicBoolean fail = new AtomicBoolean(false);
final ClientConfiguration clientConf = cluster.getClientConfig();
final int THREADS = 5;
for (int count = 0; count < THREADS; count++) {
ExecutorService executor = Executors.newFixedThreadPool(THREADS);
final int span = 500000 / 59;
for (int i = 0; i < 500000; i += 500000 / 59) {
final int finalI = i;
Runnable r = new Runnable() {
@Override
public void run() {
try {
VerifyIngest.Opts opts = new VerifyIngest.Opts();
opts.startRow = finalI;
opts.rows = span;
opts.random = 56;
opts.dataSize = 50;
opts.cols = 1;
opts.setTableName(tableName);
if (clientConf.hasSasl()) {
opts.updateKerberosCredentials(clientConf);
} else {
opts.setPrincipal(getAdminPrincipal());
PasswordToken passwordToken = (PasswordToken) getAdminToken();
opts.setPassword(new Password(new String(passwordToken.getPassword(), UTF_8)));
}
VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
} catch (Exception ex) {
log.warn("Got exception verifying data", ex);
fail.set(true);
}
}
};
executor.execute(r);
}
executor.shutdown();
executor.awaitTermination(defaultTimeoutSeconds(), TimeUnit.SECONDS);
assertFalse("Failed to successfully run all threads, Check the test output for error", fail.get());
}
int finalCount = countFiles(c);
assertTrue(finalCount < beforeCount);
try {
getClusterControl().adminStopAll();
} finally {
// Make sure the internal state in the cluster is reset (e.g. processes in MAC)
getCluster().stop();
if (ClusterType.STANDALONE == getClusterType()) {
// Then restart things for the next test if it's a standalone
getCluster().start();
}
}
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class SimpleProxyBase method setUpProxy.
/**
* Does the actual test setup, invoked by the concrete test class
*/
public static void setUpProxy() throws Exception {
assertNotNull("Implementations must initialize the TProtocolFactory", factory);
Connector c = SharedMiniClusterBase.getConnector();
Instance inst = c.getInstance();
waitForAccumulo(c);
hostname = InetAddress.getLocalHost().getCanonicalHostName();
Properties props = new Properties();
props.put("instance", inst.getInstanceName());
props.put("zookeepers", inst.getZooKeepers());
final String tokenClass;
if (isKerberosEnabled()) {
tokenClass = KerberosToken.class.getName();
TestingKdc kdc = getKdc();
// Create a principal+keytab for the proxy
proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
hostname = InetAddress.getLocalHost().getCanonicalHostName();
// Set the primary because the client needs to know it
proxyPrimary = "proxy";
// Qualify with an instance
proxyPrincipal = proxyPrimary + "/" + hostname;
kdc.createPrincipal(proxyKeytab, proxyPrincipal);
// Tack on the realm too
proxyPrincipal = kdc.qualifyUser(proxyPrincipal);
props.setProperty("kerberosPrincipal", proxyPrincipal);
props.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
props.setProperty("thriftServerType", "sasl");
// Enabled kerberos auth
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
// Login for the Proxy itself
UserGroupInformation.loginUserFromKeytab(proxyPrincipal, proxyKeytab.getAbsolutePath());
// User for tests
ClusterUser user = kdc.getRootUser();
clientPrincipal = user.getPrincipal();
clientKeytab = user.getKeytab();
} else {
clientPrincipal = "root";
tokenClass = PasswordToken.class.getName();
properties.put("password", SharedMiniClusterBase.getRootPassword());
hostname = "localhost";
}
props.put("tokenClass", tokenClass);
ClientConfiguration clientConfig = SharedMiniClusterBase.getCluster().getClientConfig();
String clientConfPath = new File(SharedMiniClusterBase.getCluster().getConfig().getConfDir(), "client.conf").getAbsolutePath();
props.put("clientConfigurationFile", clientConfPath);
properties.put("clientConfigurationFile", clientConfPath);
proxyPort = PortUtils.getRandomFreePort();
proxyServer = Proxy.createProxyServer(HostAndPort.fromParts(hostname, proxyPort), factory, props, clientConfig).server;
while (!proxyServer.isServing()) sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class InputConfigurator method getTabletLocator.
/**
* Initializes an Accumulo {@link TabletLocator} based on the configuration.
*
* @param implementingClass
* the class whose name will be used as a prefix for the property configuration key
* @param conf
* the Hadoop configuration object to configure
* @param tableId
* The table id for which to initialize the {@link TabletLocator}
* @return an Accumulo tablet locator
* @throws TableNotFoundException
* if the table name set on the configuration doesn't exist
* @since 1.6.0
*/
public static TabletLocator getTabletLocator(Class<?> implementingClass, Configuration conf, Table.ID tableId) throws TableNotFoundException {
String instanceType = conf.get(enumToConfKey(implementingClass, InstanceOpts.TYPE));
if ("MockInstance".equals(instanceType))
return DeprecationUtil.makeMockLocator();
Instance instance = getInstance(implementingClass, conf);
ClientConfiguration clientConf = getClientConfiguration(implementingClass, conf);
ClientContext context = new ClientContext(instance, new Credentials(getPrincipal(implementingClass, conf), getAuthenticationToken(implementingClass, conf)), clientConf);
return TabletLocator.getLocator(context, tableId);
}
use of org.apache.accumulo.core.client.ClientConfiguration in project accumulo by apache.
the class ConfiguratorBase method getClientConfiguration.
/**
* Obtain a {@link ClientConfiguration} based on the configuration.
*
* @param implementingClass
* the class whose name will be used as a prefix for the property configuration key
* @param conf
* the Hadoop configuration object to configure
*
* @return A {@link ClientConfiguration}
* @since 1.7.0
*/
public static ClientConfiguration getClientConfiguration(Class<?> implementingClass, Configuration conf) {
String clientConfigString = conf.get(enumToConfKey(implementingClass, InstanceOpts.CLIENT_CONFIG));
if (null != clientConfigString) {
return ClientConfiguration.deserialize(clientConfigString);
}
String instanceName = conf.get(enumToConfKey(implementingClass, InstanceOpts.NAME));
String zookeepers = conf.get(enumToConfKey(implementingClass, InstanceOpts.ZOO_KEEPERS));
ClientConfiguration clientConf = ClientConfiguration.loadDefault();
if (null != instanceName) {
clientConf.withInstance(instanceName);
}
if (null != zookeepers) {
clientConf.withZkHosts(zookeepers);
}
return clientConf;
}
Aggregations