Search in sources :

Example 1 with TestingServer

use of org.apache.curator.test.TestingServer in project hive by apache.

the class TestServiceDiscovery method setup.

@BeforeClass
public static void setup() throws Exception {
    server = new TestingServer();
    CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
    client = builder.connectString(server.getConnectString()).retryPolicy(new RetryOneTime(1)).build();
    client.start();
}
Also used : TestingServer(org.apache.curator.test.TestingServer) RetryOneTime(org.apache.curator.retry.RetryOneTime) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) BeforeClass(org.junit.BeforeClass)

Example 2 with TestingServer

use of org.apache.curator.test.TestingServer in project flink by apache.

the class JobClientActorRecoveryITCase method setup.

@BeforeClass
public static void setup() throws Exception {
    zkServer = new TestingServer();
    zkServer.start();
}
Also used : TestingServer(org.apache.curator.test.TestingServer) BeforeClass(org.junit.BeforeClass)

Example 3 with TestingServer

use of org.apache.curator.test.TestingServer in project hadoop by apache.

the class TestRMStoreCommands method testRemoveApplicationFromStateStoreCmdForZK.

@Test
public void testRemoveApplicationFromStateStoreCmdForZK() throws Exception {
    StateChangeRequestInfo req = new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
    try (TestingServer curatorTestingServer = TestZKRMStateStore.setupCuratorServer();
        CuratorFramework curatorFramework = TestZKRMStateStore.setupCuratorFramework(curatorTestingServer)) {
        Configuration conf = TestZKRMStateStore.createHARMConf("rm1,rm2", "rm1", 1234, false, curatorTestingServer);
        ResourceManager rm = new MockRM(conf);
        rm.start();
        rm.getRMContext().getRMAdminService().transitionToActive(req);
        rm.close();
        String appId = ApplicationId.newInstance(System.currentTimeMillis(), 1).toString();
        String appRootPath = YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH + "/" + ZKRMStateStore.ROOT_ZNODE_NAME + "/" + RMStateStore.RM_APP_ROOT;
        String appIdPath = appRootPath + "/" + appId;
        curatorFramework.create().forPath(appIdPath);
        assertEquals("Application node for " + appId + "should exist", appId, curatorFramework.getChildren().forPath(appRootPath).get(0));
        try {
            ResourceManager.removeApplication(conf, appId);
        } catch (Exception e) {
            fail("Exception should not be thrown while removing app from " + "rm state store.");
        }
        assertTrue("After remove app from store there should be no child nodes" + " in app root path", curatorFramework.getChildren().forPath(appRootPath).isEmpty());
    }
}
Also used : TestingServer(org.apache.curator.test.TestingServer) CuratorFramework(org.apache.curator.framework.CuratorFramework) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StateChangeRequestInfo(org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo) Test(org.junit.Test)

Example 4 with TestingServer

use of org.apache.curator.test.TestingServer in project curator by Netflix.

the class ZkPathUtilTest method testToString.

public void testToString() throws Exception {
    _zkServer = new TestingServer(4711);
    _client = ZkTestSystem.createZkClient("localhost:4711");
    final String file1 = "/files/file1";
    final String file2 = "/files/file2";
    final String file3 = "/files/file2/file3";
    _client.createPersistent(file1, true);
    _client.createPersistent(file2, true);
    _client.createPersistent(file3, true);
    String stringRepresentation = ZkPathUtil.toString(_client);
    System.out.println(stringRepresentation);
    System.out.println("-------------------------");
    assertTrue(stringRepresentation.contains("file1"));
    assertTrue(stringRepresentation.contains("file2"));
    assertTrue(stringRepresentation.contains("file3"));
    // path filtering
    stringRepresentation = ZkPathUtil.toString(_client, "/", new ZkPathUtil.PathFilter() {

        @Override
        public boolean showChilds(String path) {
            return !file2.equals(path);
        }
    });
    assertTrue(stringRepresentation.contains("file1"));
    assertTrue(stringRepresentation.contains("file2"));
    assertFalse(stringRepresentation.contains("file3"));
    // start path
    stringRepresentation = ZkPathUtil.toString(_client, file2, ZkPathUtil.PathFilter.ALL);
    assertFalse(stringRepresentation.contains("file1"));
    assertTrue(stringRepresentation.contains("file2"));
    assertTrue(stringRepresentation.contains("file3"));
    _zkServer.close();
}
Also used : TestingServer(org.apache.curator.test.TestingServer)

Example 5 with TestingServer

use of org.apache.curator.test.TestingServer in project flink by apache.

the class KafkaTestEnvironmentImpl method prepare.

@Override
public void prepare(int numKafkaServers, Properties additionalServerProperties, boolean secureMode) {
    //increase the timeout since in Travis ZK connection takes long time for secure connection.
    if (secureMode) {
        //run only one kafka server to avoid multiple ZK connections from many instances - Travis timeout
        numKafkaServers = 1;
        zkTimeout = String.valueOf(Integer.parseInt(zkTimeout) * 15);
    }
    this.additionalServerProperties = additionalServerProperties;
    this.secureMode = secureMode;
    File tempDir = new File(System.getProperty("java.io.tmpdir"));
    tmpZkDir = new File(tempDir, "kafkaITcase-zk-dir-" + (UUID.randomUUID().toString()));
    assertTrue("cannot create zookeeper temp dir", tmpZkDir.mkdirs());
    tmpKafkaParent = new File(tempDir, "kafkaITcase-kafka-dir*" + (UUID.randomUUID().toString()));
    assertTrue("cannot create kafka temp dir", tmpKafkaParent.mkdirs());
    tmpKafkaDirs = new ArrayList<>(numKafkaServers);
    for (int i = 0; i < numKafkaServers; i++) {
        File tmpDir = new File(tmpKafkaParent, "server-" + i);
        assertTrue("cannot create kafka temp dir", tmpDir.mkdir());
        tmpKafkaDirs.add(tmpDir);
    }
    zookeeper = null;
    brokers = null;
    try {
        LOG.info("Starting Zookeeper");
        zookeeper = new TestingServer(-1, tmpZkDir);
        zookeeperConnectionString = zookeeper.getConnectString();
        LOG.info("zookeeperConnectionString: {}", zookeeperConnectionString);
        LOG.info("Starting KafkaServer");
        brokers = new ArrayList<>(numKafkaServers);
        for (int i = 0; i < numKafkaServers; i++) {
            brokers.add(getKafkaServer(i, tmpKafkaDirs.get(i)));
            SocketServer socketServer = brokers.get(i).socketServer();
            if (secureMode) {
                brokerConnectionString += hostAndPortToUrlString(KafkaTestEnvironment.KAFKA_HOST, brokers.get(i).socketServer().boundPort(SecurityProtocol.SASL_PLAINTEXT)) + ",";
            } else {
                brokerConnectionString += hostAndPortToUrlString(KafkaTestEnvironment.KAFKA_HOST, brokers.get(i).socketServer().boundPort(SecurityProtocol.PLAINTEXT)) + ",";
            }
        }
        LOG.info("ZK and KafkaServer started.");
    } catch (Throwable t) {
        t.printStackTrace();
        fail("Test setup failed: " + t.getMessage());
    }
    LOG.info("brokerConnectionString --> {}", brokerConnectionString);
    standardProps = new Properties();
    standardProps.setProperty("zookeeper.connect", zookeeperConnectionString);
    standardProps.setProperty("bootstrap.servers", brokerConnectionString);
    standardProps.setProperty("group.id", "flink-tests");
    standardProps.setProperty("enable.auto.commit", "false");
    standardProps.setProperty("zookeeper.session.timeout.ms", zkTimeout);
    standardProps.setProperty("zookeeper.connection.timeout.ms", zkTimeout);
    // read from the beginning. (earliest is kafka 0.9 value)
    standardProps.setProperty("auto.offset.reset", "earliest");
    // make a lot of fetches (MESSAGES MUST BE SMALLER!)
    standardProps.setProperty("max.partition.fetch.bytes", "256");
}
Also used : TestingServer(org.apache.curator.test.TestingServer) SocketServer(kafka.network.SocketServer) Properties(java.util.Properties) File(java.io.File)

Aggregations

TestingServer (org.apache.curator.test.TestingServer)150 Before (org.junit.Before)38 Test (org.junit.Test)30 CuratorFramework (org.apache.curator.framework.CuratorFramework)28 File (java.io.File)27 Properties (java.util.Properties)18 KafkaConfig (kafka.server.KafkaConfig)15 RetryOneTime (org.apache.curator.retry.RetryOneTime)15 Test (org.testng.annotations.Test)15 ExponentialBackoffRetry (org.apache.curator.retry.ExponentialBackoffRetry)14 IOException (java.io.IOException)13 Timing (org.apache.curator.test.Timing)13 HashMap (java.util.HashMap)12 ZkClient (org.I0Itec.zkclient.ZkClient)12 ServerSocket (java.net.ServerSocket)11 KafkaServerStartable (kafka.server.KafkaServerStartable)11 ZkUtils (kafka.utils.ZkUtils)11 ZkConnection (org.I0Itec.zkclient.ZkConnection)11 BeforeClass (org.junit.BeforeClass)11 TestingServerStarter (io.pravega.test.common.TestingServerStarter)9