use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project accumulo by apache.
the class KerberosIT method startMac.
@Before
public void startMac() throws Exception {
MiniClusterHarness harness = new MiniClusterHarness();
mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() {
@Override
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
Map<String, String> site = cfg.getSiteConfig();
site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s");
cfg.setSiteConfig(site);
}
});
mac.getConfig().setNumTservers(1);
mac.start();
// Enabled kerberos auth
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project accumulo by apache.
the class KerberosProxyIT method startMac.
@Before
public void startMac() throws Exception {
MiniClusterHarness harness = new MiniClusterHarness();
mac = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"), new MiniClusterConfigurationCallback() {
@Override
public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
cfg.setNumTservers(1);
Map<String, String> siteCfg = cfg.getSiteConfig();
// Allow the proxy to impersonate the "root" Accumulo user and our one special user.
siteCfg.put(Property.INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION.getKey(), proxyPrincipal + ":" + kdc.getRootUser().getPrincipal() + "," + kdc.qualifyUser(PROXIED_USER1) + "," + kdc.qualifyUser(PROXIED_USER2));
siteCfg.put(Property.INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION.getKey(), "*");
cfg.setSiteConfig(siteCfg);
}
}, kdc);
mac.start();
MiniAccumuloConfigImpl cfg = mac.getConfig();
// Generate Proxy configuration and start the proxy
proxyProcess = startProxy(cfg);
// Enabled kerberos auth
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
boolean success = false;
ClusterUser rootUser = kdc.getRootUser();
// Rely on the junit timeout rule
while (!success) {
UserGroupInformation ugi;
try {
ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
} catch (IOException ex) {
log.info("Login as root is failing", ex);
Thread.sleep(3000);
continue;
}
TSocket socket = new TSocket(hostname, proxyPort);
log.info("Connecting to proxy with server primary '{}' running on {}", proxyPrimary, hostname);
TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop", "auth"), null, socket);
final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
try {
// UGI transport will perform the doAs for us
ugiTransport.open();
success = true;
} catch (TTransportException e) {
Throwable cause = e.getCause();
if (null != cause && cause instanceof ConnectException) {
log.info("Proxy not yet up, waiting");
Thread.sleep(3000);
proxyProcess = checkProxyAndRestart(proxyProcess, cfg);
continue;
}
} finally {
if (null != ugiTransport) {
ugiTransport.close();
}
}
}
assertTrue("Failed to connect to the proxy repeatedly", success);
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project incubator-rya by apache.
the class GeoWaveGTQueryTest method setup.
@BeforeClass
public static void setup() throws AccumuloException, AccumuloSecurityException, IOException, InterruptedException {
tempAccumuloDir = Files.createTempDir();
accumulo = MiniAccumuloClusterFactory.newAccumuloCluster(new MiniAccumuloConfigImpl(tempAccumuloDir, ACCUMULO_PASSWORD), GeoWaveGTQueryTest.class);
accumulo.start();
dataStore = new AccumuloDataStore(new BasicAccumuloOperations(accumulo.getZooKeepers(), accumulo.getInstanceName(), ACCUMULO_USER, ACCUMULO_PASSWORD, TABLE_NAMESPACE));
ingestCannedData();
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project accumulo by apache.
the class MiniClusterHarness method create.
public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token, MiniClusterConfigurationCallback configCallback, TestingKdc kdc) throws Exception {
requireNonNull(token);
checkArgument(token instanceof PasswordToken || token instanceof KerberosToken, "A PasswordToken or KerberosToken is required");
String rootPasswd;
if (token instanceof PasswordToken) {
rootPasswd = new String(((PasswordToken) token).getPassword(), UTF_8);
} else {
rootPasswd = UUID.randomUUID().toString();
}
File baseDir = AccumuloClusterHarness.createTestDir(testClassName + "_" + testMethodName);
MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, rootPasswd);
// Enable native maps by default
cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
Configuration coreSite = new Configuration(false);
// Setup SSL and credential providers if the properties request such
configureForEnvironment(cfg, getClass(), AccumuloClusterHarness.getSslDir(baseDir), coreSite, kdc);
// Invoke the callback for tests to configure MAC before it starts
configCallback.configureMiniCluster(cfg, coreSite);
MiniAccumuloClusterImpl miniCluster = new MiniAccumuloClusterImpl(cfg);
// Write out any configuration items to a file so HDFS will pick them up automatically (from the classpath)
if (coreSite.size() > 0) {
File csFile = new File(miniCluster.getConfig().getConfDir(), "core-site.xml");
if (csFile.exists())
throw new RuntimeException(csFile + " already exist");
OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(miniCluster.getConfig().getConfDir(), "core-site.xml")));
coreSite.writeXml(out);
out.close();
}
return miniCluster;
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project accumulo by apache.
the class ExistingMacIT method testExistingInstance.
@Test
public void testExistingInstance() throws Exception {
Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
conn.tableOperations().create("table1");
BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
Mutation m1 = new Mutation("00081");
m1.put("math", "sqroot", "9");
m1.put("math", "sq", "6560");
bw.addMutation(m1);
bw.close();
conn.tableOperations().flush("table1", null, null, true);
// TOOD use constants
conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
conn.tableOperations().flush(RootTable.NAME, null, null, true);
Set<Entry<ServerType, Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
for (Entry<ServerType, Collection<ProcessReference>> entry : procs) {
if (entry.getKey() == ServerType.ZOOKEEPER)
continue;
for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr);
}
final DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
final long zkTimeout = ConfigurationTypeHelper.getTimeInMillis(getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
final String zInstanceRoot = Constants.ZROOT + "/" + conn.getInstance().getInstanceID();
while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
log.debug("Accumulo services still have their ZK locks held");
Thread.sleep(1000);
}
File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
FileUtils.deleteQuietly(hadoopConfDir);
assertTrue(hadoopConfDir.mkdirs());
createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
FileUtils.deleteQuietly(testDir2);
MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
accumulo2.start();
conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD));
try (Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY)) {
int sum = 0;
for (Entry<Key, Value> entry : scanner) {
sum += Integer.parseInt(entry.getValue().toString());
}
Assert.assertEquals(6569, sum);
}
accumulo2.stop();
}
Aggregations