Search in sources :

Example 1 with ZooCache

use of org.apache.accumulo.fate.zookeeper.ZooCache in project accumulo by apache.

the class AdminTest method testCannotQualifySessionId.

@Test
public void testCannotQualifySessionId() {
    ZooCache zc = EasyMock.createMock(ZooCache.class);
    String root = "/accumulo/id/tservers";
    String server = "localhost:12345";
    String serverPath = root + "/" + server;
    EasyMock.expect(zc.getChildren(serverPath)).andReturn(Collections.emptyList());
    EasyMock.replay(zc);
    // A server that isn't in ZooKeeper. Can't qualify it, should return the original
    assertEquals(server, Admin.qualifyWithZooKeeperSessionId(root, zc, server));
    EasyMock.verify(zc);
}
Also used : ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache) Test(org.junit.Test)

Example 2 with ZooCache

use of org.apache.accumulo.fate.zookeeper.ZooCache in project accumulo by apache.

the class AdminTest method testQualifySessionId.

@Test
public void testQualifySessionId() {
    ZooCache zc = EasyMock.createMock(ZooCache.class);
    String root = "/accumulo/id/tservers";
    String server = "localhost:12345";
    final long session = 123456789l;
    String serverPath = root + "/" + server;
    EasyMock.expect(zc.getChildren(serverPath)).andReturn(Collections.singletonList("child"));
    EasyMock.expect(zc.get(EasyMock.eq(serverPath + "/child"), EasyMock.anyObject(ZcStat.class))).andAnswer(new IAnswer<byte[]>() {

        @Override
        public byte[] answer() throws Throwable {
            ZcStat stat = (ZcStat) EasyMock.getCurrentArguments()[1];
            stat.setEphemeralOwner(session);
            return new byte[0];
        }
    });
    EasyMock.replay(zc);
    assertEquals(server + "[" + Long.toHexString(session) + "]", Admin.qualifyWithZooKeeperSessionId(root, zc, server));
    EasyMock.verify(zc);
}
Also used : ZcStat(org.apache.accumulo.fate.zookeeper.ZooCache.ZcStat) ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache) Test(org.junit.Test)

Example 3 with ZooCache

use of org.apache.accumulo.fate.zookeeper.ZooCache in project accumulo by apache.

the class ReadWriteIT method sunnyDay.

@Test
public void sunnyDay() throws Exception {
    // Start accumulo, create a table, insert some data, verify we can read it out.
    // Shutdown cleanly.
    log.debug("Starting Monitor");
    cluster.getClusterControl().startAllServers(ServerType.MONITOR);
    Connector connector = getConnector();
    String tableName = getUniqueNames(1)[0];
    ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
    verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
    String monitorLocation = null;
    while (null == monitorLocation) {
        monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
        if (null == monitorLocation) {
            log.debug("Could not fetch monitor HTTP address from zookeeper");
            Thread.sleep(2000);
        }
    }
    String scheme = "http://";
    if (getCluster() instanceof StandaloneAccumuloCluster) {
        StandaloneAccumuloCluster standaloneCluster = (StandaloneAccumuloCluster) getCluster();
        File accumuloSite = new File(standaloneCluster.getServerAccumuloConfDir(), "accumulo-site.xml");
        if (accumuloSite.isFile()) {
            Configuration conf = new Configuration(false);
            conf.addResource(new Path(accumuloSite.toURI()));
            String monitorSslKeystore = conf.get(Property.MONITOR_SSL_KEYSTORE.getKey());
            if (null != monitorSslKeystore) {
                log.info("Setting scheme to HTTPS since monitor ssl keystore configuration was observed in {}", accumuloSite);
                scheme = "https://";
                SSLContext ctx = SSLContext.getInstance("SSL");
                TrustManager[] tm = new TrustManager[] { new TestTrustManager() };
                ctx.init(new KeyManager[0], tm, new SecureRandom());
                SSLContext.setDefault(ctx);
                HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
                HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
            }
        } else {
            log.info("{} is not a normal file, not checking for monitor running with SSL", accumuloSite);
        }
    }
    URL url = new URL(scheme + monitorLocation);
    log.debug("Fetching web page {}", url);
    String result = FunctionalTestUtils.readAll(url.openStream());
    assertTrue(result.length() > 100);
    log.debug("Stopping accumulo cluster");
    ClusterControl control = cluster.getClusterControl();
    control.adminStopAll();
    ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), connector.getInstance().getZooKeepersSessionTimeOut());
    ZooCache zcache = new ZooCache(zreader, null);
    byte[] masterLockData;
    do {
        masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null);
        if (null != masterLockData) {
            log.info("Master lock is still held");
            Thread.sleep(1000);
        }
    } while (null != masterLockData);
    control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
    control.stopAllServers(ServerType.MONITOR);
    control.stopAllServers(ServerType.TRACER);
    log.debug("success!");
    // Restarting everything
    cluster.start();
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Configuration(org.apache.hadoop.conf.Configuration) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) SecureRandom(java.security.SecureRandom) SSLContext(javax.net.ssl.SSLContext) StandaloneAccumuloCluster(org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster) ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache) URL(java.net.URL) TrustManager(javax.net.ssl.TrustManager) X509TrustManager(javax.net.ssl.X509TrustManager) ZooReader(org.apache.accumulo.fate.zookeeper.ZooReader) File(java.io.File) ClusterControl(org.apache.accumulo.cluster.ClusterControl) Test(org.junit.Test)

Example 4 with ZooCache

use of org.apache.accumulo.fate.zookeeper.ZooCache in project accumulo by apache.

the class RestartIT method restartMasterSplit.

@Test
public void restartMasterSplit() throws Exception {
    Connector c = getConnector();
    final String tableName = getUniqueNames(1)[0];
    final AuthenticationToken token = getAdminToken();
    final ClusterControl control = getCluster().getClusterControl();
    VOPTS.setTableName(tableName);
    c.tableOperations().create(tableName);
    c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
    final String[] args;
    if (token instanceof PasswordToken) {
        byte[] password = ((PasswordToken) token).getPassword();
        args = new String[] { "-u", getAdminPrincipal(), "-p", new String(password, UTF_8), "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--rows", Integer.toString(VOPTS.rows), "--table", tableName };
        OPTS.setPrincipal(getAdminPrincipal());
        VOPTS.setPrincipal(getAdminPrincipal());
    } else if (token instanceof KerberosToken) {
        ClusterUser rootUser = getAdminUser();
        args = new String[] { "-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--rows", Integer.toString(VOPTS.rows), "--table", tableName };
        ClientConfiguration clientConfig = cluster.getClientConfig();
        OPTS.updateKerberosCredentials(clientConfig);
        VOPTS.updateKerberosCredentials(clientConfig);
    } else {
        throw new RuntimeException("Unknown token");
    }
    Future<Integer> ret = svc.submit(new Callable<Integer>() {

        @Override
        public Integer call() {
            try {
                return control.exec(TestIngest.class, args);
            } catch (Exception e) {
                log.error("Error running TestIngest", e);
                return -1;
            }
        }
    });
    control.stopAllServers(ServerType.MASTER);
    ZooReader zreader = new ZooReader(c.getInstance().getZooKeepers(), c.getInstance().getZooKeepersSessionTimeOut());
    ZooCache zcache = new ZooCache(zreader, null);
    byte[] masterLockData;
    do {
        masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
        if (null != masterLockData) {
            log.info("Master lock is still held");
            Thread.sleep(1000);
        }
    } while (null != masterLockData);
    cluster.start();
    assertEquals(0, ret.get().intValue());
    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) AuthenticationToken(org.apache.accumulo.core.client.security.tokens.AuthenticationToken) KerberosToken(org.apache.accumulo.core.client.security.tokens.KerberosToken) ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache) IOException(java.io.IOException) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) ZooReader(org.apache.accumulo.fate.zookeeper.ZooReader) TestIngest(org.apache.accumulo.test.TestIngest) ClusterUser(org.apache.accumulo.cluster.ClusterUser) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) ClusterControl(org.apache.accumulo.cluster.ClusterControl) Test(org.junit.Test)

Example 5 with ZooCache

use of org.apache.accumulo.fate.zookeeper.ZooCache in project accumulo by apache.

the class RestartIT method restartMasterRecovery.

@Test
public void restartMasterRecovery() throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    OPTS.setTableName(tableName);
    VOPTS.setTableName(tableName);
    ClientConfiguration clientConfig = cluster.getClientConfig();
    if (clientConfig.hasSasl()) {
        OPTS.updateKerberosCredentials(clientConfig);
        VOPTS.updateKerberosCredentials(clientConfig);
    } else {
        OPTS.setPrincipal(getAdminPrincipal());
        VOPTS.setPrincipal(getAdminPrincipal());
    }
    TestIngest.ingest(c, OPTS, BWOPTS);
    ClusterControl control = getCluster().getClusterControl();
    // TODO implement a kill all too?
    // cluster.stop() would also stop ZooKeeper
    control.stopAllServers(ServerType.MASTER);
    control.stopAllServers(ServerType.TRACER);
    control.stopAllServers(ServerType.TABLET_SERVER);
    control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
    control.stopAllServers(ServerType.MONITOR);
    ZooReader zreader = new ZooReader(c.getInstance().getZooKeepers(), c.getInstance().getZooKeepersSessionTimeOut());
    ZooCache zcache = new ZooCache(zreader, null);
    byte[] masterLockData;
    do {
        masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
        if (null != masterLockData) {
            log.info("Master lock is still held");
            Thread.sleep(1000);
        }
    } while (null != masterLockData);
    cluster.start();
    sleepUninterruptibly(5, TimeUnit.MILLISECONDS);
    control.stopAllServers(ServerType.MASTER);
    masterLockData = new byte[0];
    do {
        masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
        if (null != masterLockData) {
            log.info("Master lock is still held");
            Thread.sleep(1000);
        }
    } while (null != masterLockData);
    cluster.start();
    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ZooReader(org.apache.accumulo.fate.zookeeper.ZooReader) ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) ClusterControl(org.apache.accumulo.cluster.ClusterControl) Test(org.junit.Test)

Aggregations

ZooCache (org.apache.accumulo.fate.zookeeper.ZooCache)25 Instance (org.apache.accumulo.core.client.Instance)7 Test (org.junit.Test)6 ZooCacheFactory (org.apache.accumulo.fate.zookeeper.ZooCacheFactory)5 ClientConfiguration (org.apache.accumulo.core.client.ClientConfiguration)4 Connector (org.apache.accumulo.core.client.Connector)4 ZooReader (org.apache.accumulo.fate.zookeeper.ZooReader)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 ClusterControl (org.apache.accumulo.cluster.ClusterControl)3 File (java.io.File)2 NamespaceNotFoundException (org.apache.accumulo.core.client.NamespaceNotFoundException)2 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)2 ServerServices (org.apache.accumulo.core.util.ServerServices)2 PropCacheKey (org.apache.accumulo.server.conf.ZooCachePropertyAccessor.PropCacheKey)2 Path (org.apache.hadoop.fs.Path)2 TTransport (org.apache.thrift.transport.TTransport)2 FileOutputStream (java.io.FileOutputStream)1 ObjectOutputStream (java.io.ObjectOutputStream)1 URL (java.net.URL)1