use of org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster in project accumulo by apache.
the class ReadWriteIT method sunnyDay.
@Test
public void sunnyDay() throws Exception {
// Start accumulo, create a table, insert some data, verify we can read it out.
// Shutdown cleanly.
log.debug("Starting Monitor");
cluster.getClusterControl().startAllServers(ServerType.MONITOR);
Connector connector = getConnector();
String tableName = getUniqueNames(1)[0];
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
String monitorLocation = null;
while (null == monitorLocation) {
monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
if (null == monitorLocation) {
log.debug("Could not fetch monitor HTTP address from zookeeper");
Thread.sleep(2000);
}
}
String scheme = "http://";
if (getCluster() instanceof StandaloneAccumuloCluster) {
StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster();
File accumuloSite = new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo-site.xml");
if (accumuloSite.isFile()) {
Configuration conf = new Configuration(false);
conf.addResource(new Path(accumuloSite.toURI()));
String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey());
if (null != monitorSslKeystore) {
log.info("Setting scheme to HTTPS since monitor ssl keystore configuration was observed in {}", accumuloSite);
scheme = "https://";
SSLContext ctx = SSLContext.getInstance("SSL");
TrustManager[] tm = new TrustManager[] { new TestTrustManager() };
ctx.init(new KeyManager[0], tm, new SecureRandom());
SSLContext.setDefault(ctx);
HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
}
} else {
log.info("{} is not a normal file, not checking for monitor running with SSL", accumuloSite);
}
}
URL url = new URL(scheme + monitorLocation);
log.debug("Fetching web page {}", url);
String result = FunctionalTestUtils.readAll(url.openStream());
assertTrue(result.length() > 100);
log.debug("Stopping accumulo cluster");
ClusterControl control = cluster.getClusterControl();
control.adminStopAll();
ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), connector.getInstance().getZooKeepersSessionTimeOut());
ZooCache zcache = new ZooCache(zreader, null);
byte[] masterLockData;
do {
masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null);
if (null != masterLockData) {
log.info("Master lock is still held");
Thread.sleep(1000);
}
} while (null != masterLockData);
control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
control.stopAllServers(ServerType.MONITOR);
control.stopAllServers(ServerType.TRACER);
log.debug("success!");
// Restarting everything
cluster.start();
}
use of org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster in project accumulo by apache.
the class AccumuloClusterHarness method setupCluster.
@Before
public void setupCluster() throws Exception {
// Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
Assume.assumeTrue(canRunTest(type));
switch(type) {
case MINI:
MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
// Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
cluster = impl;
// MAC makes a ClientConf for us, just set it
((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
// Login as the "root" user
if (null != krb) {
ClusterUser rootUser = krb.getRootUser();
// Log in the 'client' user
UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
}
break;
case STANDALONE:
StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
ClientConfiguration clientConf = conf.getClientConf();
StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers());
// If these are provided in the configuration, pass them into the cluster
standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
standaloneCluster.setServerCmdPrefix(conf.getServerCmdPrefix());
standaloneCluster.setClientCmdPrefix(conf.getClientCmdPrefix());
// For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
if (clientConf.hasSasl()) {
UserGroupInformation.setConfiguration(hadoopConfiguration);
// Login as the admin user to start the tests
UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), conf.getAdminKeytab().getAbsolutePath());
}
// Set the implementation
cluster = standaloneCluster;
break;
default:
throw new RuntimeException("Unhandled type");
}
if (type.isDynamic()) {
cluster.start();
} else {
log.info("Removing tables which appear to be from a previous test run");
cleanupTables();
log.info("Removing users which appear to be from a previous test run");
cleanupUsers();
}
switch(type) {
case MINI:
if (null != krb) {
final String traceTable = Property.TRACE_TABLE.getDefaultValue();
final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
// Login as the trace user
UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
// Open a connector as the system user (ensures the user will exist for us to assign permissions to)
UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());
// Then, log back in as the "root" user and do the grant
UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
conn = getConnector();
// Create the trace table
conn.tableOperations().create(traceTable);
// Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
// to have the ability to read, write and alter the trace table
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
}
break;
default:
}
}
use of org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster in project accumulo by apache.
the class ReadWriteIT method verifyLocalityGroupsInRFile.
private void verifyLocalityGroupsInRFile(final Connector connector, final String tableName) throws Exception, AccumuloException, AccumuloSecurityException, TableNotFoundException {
ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
connector.tableOperations().flush(tableName, null, null, true);
try (BatchScanner bscanner = connector.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1)) {
String tableId = connector.tableOperations().tableIdMap().get(tableName);
bscanner.setRanges(Collections.singletonList(new Range(new Text(tableId + ";"), new Text(tableId + "<"))));
bscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
boolean foundFile = false;
for (Entry<Key, Value> entry : bscanner) {
foundFile = true;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream oldOut = System.out;
try (PrintStream newOut = new PrintStream(baos)) {
System.setOut(newOut);
List<String> args = new ArrayList<>();
args.add(entry.getKey().getColumnQualifier().toString());
if (ClusterType.STANDALONE == getClusterType() && cluster.getClientConfig().hasSasl()) {
args.add("--config");
StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) cluster;
String hadoopConfDir = sac.getHadoopConfDir();
args.add(new Path(hadoopConfDir, "core-site.xml").toString());
args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
}
log.info("Invoking PrintInfo with {}", args);
PrintInfo.main(args.toArray(new String[args.size()]));
newOut.flush();
String stdout = baos.toString();
assertTrue(stdout.contains("Locality group : g1"));
assertTrue(stdout.contains("families : [colf]"));
} finally {
System.setOut(oldOut);
}
}
assertTrue(foundFile);
}
}
Aggregations