Search in sources :

Example 1 with StandaloneAccumuloCluster

use of org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster in project accumulo by apache.

the class ReadWriteIT method sunnyDay.

@Test
public void sunnyDay() throws Exception {
    // Start accumulo, create a table, insert some data, verify we can read it out.
    // Shutdown cleanly.
    log.debug("Starting Monitor");
    cluster.getClusterControl().startAllServers(ServerType.MONITOR);
    Connector connector = getConnector();
    String tableName = getUniqueNames(1)[0];
    ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
    verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
    String monitorLocation = null;
    while (null == monitorLocation) {
        monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
        if (null == monitorLocation) {
            log.debug("Could not fetch monitor HTTP address from zookeeper");
            Thread.sleep(2000);
        }
    }
    String scheme = "http://";
    if (getCluster() instanceof StandaloneAccumuloCluster) {
        StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster();
        File accumuloSite = new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo-site.xml");
        if (accumuloSite.isFile()) {
            Configuration conf = new Configuration(false);
            conf.addResource(new Path(accumuloSite.toURI()));
            String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey());
            if (null != monitorSslKeystore) {
                log.info("Setting scheme to HTTPS since monitor ssl keystore configuration was observed in {}", accumuloSite);
                scheme = "https://";
                SSLContext ctx = SSLContext.getInstance("SSL");
                TrustManager[] tm = new TrustManager[] { new TestTrustManager() };
                ctx.init(new KeyManager[0], tm, new SecureRandom());
                SSLContext.setDefault(ctx);
                HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
                HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
            }
        } else {
            log.info("{} is not a normal file, not checking for monitor running with SSL", accumuloSite);
        }
    }
    URL url = new URL(scheme + monitorLocation);
    log.debug("Fetching web page {}", url);
    String result = FunctionalTestUtils.readAll(url.openStream());
    assertTrue(result.length() > 100);
    log.debug("Stopping accumulo cluster");
    ClusterControl control = cluster.getClusterControl();
    control.adminStopAll();
    ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), connector.getInstance().getZooKeepersSessionTimeOut());
    ZooCache zcache = new ZooCache(zreader, null);
    byte[] masterLockData;
    do {
        masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null);
        if (null != masterLockData) {
            log.info("Master lock is still held");
            Thread.sleep(1000);
        }
    } while (null != masterLockData);
    control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
    control.stopAllServers(ServerType.MONITOR);
    control.stopAllServers(ServerType.TRACER);
    log.debug("success!");
    // Restarting everything
    cluster.start();
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Configuration(org.apache.hadoop.conf.Configuration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) SecureRandom(java.security.SecureRandom) SSLContext(javax.net.ssl.SSLContext) StandaloneAccumuloCluster(org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster) ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache) URL(java.net.URL) TrustManager(javax.net.ssl.TrustManager) X509TrustManager(javax.net.ssl.X509TrustManager) ZooReader(org.apache.accumulo.fate.zookeeper.ZooReader) File(java.io.File) ClusterControl(org.apache.accumulo.cluster.ClusterControl) Test(org.junit.Test)

Example 2 with StandaloneAccumuloCluster

use of org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster in project accumulo by apache.

the class AccumuloClusterHarness method setupCluster.

@Before
public void setupCluster() throws Exception {
    // Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
    Assume.assumeTrue(canRunTest(type));
    switch(type) {
        case MINI:
            MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
            // Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
            MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
            cluster = impl;
            // MAC makes a ClientConf for us, just set it
            ((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
            // Login as the "root" user
            if (null != krb) {
                ClusterUser rootUser = krb.getRootUser();
                // Log in the 'client' user
                UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
            }
            break;
        case STANDALONE:
            StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
            ClientConfiguration clientConf = conf.getClientConf();
            StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers());
            // If these are provided in the configuration, pass them into the cluster
            standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
            standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
            standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
            standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
            standaloneCluster.setServerCmdPrefix(conf.getServerCmdPrefix());
            standaloneCluster.setClientCmdPrefix(conf.getClientCmdPrefix());
            // For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
            Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
            if (clientConf.hasSasl()) {
                UserGroupInformation.setConfiguration(hadoopConfiguration);
                // Login as the admin user to start the tests
                UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), conf.getAdminKeytab().getAbsolutePath());
            }
            // Set the implementation
            cluster = standaloneCluster;
            break;
        default:
            throw new RuntimeException("Unhandled type");
    }
    if (type.isDynamic()) {
        cluster.start();
    } else {
        log.info("Removing tables which appear to be from a previous test run");
        cleanupTables();
        log.info("Removing users which appear to be from a previous test run");
        cleanupUsers();
    }
    switch(type) {
        case MINI:
            if (null != krb) {
                final String traceTable = Property.TRACE_TABLE.getDefaultValue();
                final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
                // Login as the trace user
                UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
                // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
                UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
                Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());
                // Then, log back in as the "root" user and do the grant
                UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
                conn = getConnector();
                // Create the trace table
                conn.tableOperations().create(traceTable);
                // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
                // to have the ability to read, write and alter the trace table
                conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
                conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
                conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
            }
            break;
        default:
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) AccumuloMiniClusterConfiguration(org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StandaloneAccumuloClusterConfiguration(org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration) AccumuloClusterPropertyConfiguration(org.apache.accumulo.harness.conf.AccumuloClusterPropertyConfiguration) AccumuloClusterConfiguration(org.apache.accumulo.harness.conf.AccumuloClusterConfiguration) AccumuloMiniClusterConfiguration(org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) StandaloneAccumuloClusterConfiguration(org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) ClusterUser(org.apache.accumulo.cluster.ClusterUser) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) StandaloneAccumuloCluster(org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Before(org.junit.Before)

Example 3 with StandaloneAccumuloCluster

use of org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster in project accumulo by apache.

the class ReadWriteIT method verifyLocalityGroupsInRFile.

private void verifyLocalityGroupsInRFile(final Connector connector, final String tableName) throws Exception, AccumuloException, AccumuloSecurityException, TableNotFoundException {
    ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
    verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
    connector.tableOperations().flush(tableName, null, null, true);
    try (BatchScanner bscanner = connector.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1)) {
        String tableId = connector.tableOperations().tableIdMap().get(tableName);
        bscanner.setRanges(Collections.singletonList(new Range(new Text(tableId + ";"), new Text(tableId + "<"))));
        bscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        boolean foundFile = false;
        for (Entry<Key, Value> entry : bscanner) {
            foundFile = true;
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            PrintStream oldOut = System.out;
            try (PrintStream newOut = new PrintStream(baos)) {
                System.setOut(newOut);
                List<String> args = new ArrayList<>();
                args.add(entry.getKey().getColumnQualifier().toString());
                if (ClusterType.STANDALONE == getClusterType() && cluster.getClientConfig().hasSasl()) {
                    args.add("--config");
                    StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) cluster;
                    String hadoopConfDir = sac.getHadoopConfDir();
                    args.add(new Path(hadoopConfDir, "core-site.xml").toString());
                    args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
                }
                log.info("Invoking PrintInfo with {}", args);
                PrintInfo.main(args.toArray(new String[args.size()]));
                newOut.flush();
                String stdout = baos.toString();
                assertTrue(stdout.contains("Locality group           : g1"));
                assertTrue(stdout.contains("families        : [colf]"));
            } finally {
                System.setOut(oldOut);
            }
        }
        assertTrue(foundFile);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) PrintStream(java.io.PrintStream) BatchScanner(org.apache.accumulo.core.client.BatchScanner) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Range(org.apache.accumulo.core.data.Range) StandaloneAccumuloCluster(org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key)

Aggregations

StandaloneAccumuloCluster (org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster)3 ClientConfiguration (org.apache.accumulo.core.client.ClientConfiguration)2 Connector (org.apache.accumulo.core.client.Connector)2 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 File (java.io.File)1 PrintStream (java.io.PrintStream)1 URL (java.net.URL)1 SecureRandom (java.security.SecureRandom)1 ArrayList (java.util.ArrayList)1 SSLContext (javax.net.ssl.SSLContext)1 TrustManager (javax.net.ssl.TrustManager)1 X509TrustManager (javax.net.ssl.X509TrustManager)1 ClusterControl (org.apache.accumulo.cluster.ClusterControl)1 ClusterUser (org.apache.accumulo.cluster.ClusterUser)1 BatchScanner (org.apache.accumulo.core.client.BatchScanner)1 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)1 KerberosToken (org.apache.accumulo.core.client.security.tokens.KerberosToken)1 Key (org.apache.accumulo.core.data.Key)1